seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37530746951
|
from django.core.management.base import BaseCommand
from assessment.models.assessment_model import AssessmentType
class Command(BaseCommand):
help = 'Creates initial Assessment Types'
def handle(self, *args, **options):
# Creating 'Homework' AssessmentType
homework, created = AssessmentType.objects.get_or_create(
name='Homework'
)
if created:
self.stdout.write(self.style.SUCCESS('Successfully created Homework AssessmentType'))
else:
self.stdout.write(self.style.SUCCESS('Homework AssessmentType already exists'))
# Creating 'Test' AssessmentType
test, created = AssessmentType.objects.get_or_create(
name='Test'
)
if created:
self.stdout.write(self.style.SUCCESS('Successfully created Test AssessmentType'))
else:
self.stdout.write(self.style.SUCCESS('Test AssessmentType already exists'))
|
markoco14/student-mgmt
|
assessment/management/commands/create_assessment_types.py
|
create_assessment_types.py
|
py
| 959 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects.get_or_create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects.get_or_create",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType",
"line_number": 19,
"usage_type": "name"
}
] |
17609833661
|
# encoding: utf-8
from django.urls import reverse
from rest_framework import serializers
from mainsite.serializers import StripTagsCharField
from mainsite.utils import OriginSetting
class ExternalToolSerializerV1(serializers.Serializer):
name = StripTagsCharField(max_length=254)
client_id = StripTagsCharField(max_length=254)
slug = StripTagsCharField(max_length=255, source='entity_id', read_only=True)
def to_representation(self, instance):
representation = super(ExternalToolSerializerV1, self).to_representation(instance)
representation['launchpoints'] = {
lp.launchpoint: {
"url": "{}{}".format(OriginSetting.HTTP, reverse("v1_api_externaltools_launch", kwargs=dict(
launchpoint=lp.launchpoint,
slug=lp.cached_externaltool.entity_id
))),
"launch_url": lp.launch_url,
"label": lp.label,
"icon_url": lp.icon_url
} for lp in instance.cached_launchpoints()
}
return representation
class ExternalToolLaunchSerializerV1(serializers.Serializer):
launch_url = serializers.URLField()
def to_representation(self, instance):
representation = super(ExternalToolLaunchSerializerV1, self).to_representation(instance)
requesting_user = self.context['request'].user if 'request' in self.context else None
context_id = self.context.get('tool_launch_context_id', None)
representation['launch_data'] = instance.generate_launch_data(user=requesting_user, context_id=context_id)
return representation
|
reedu-reengineering-education/badgr-server
|
apps/externaltools/serializers_v1.py
|
serializers_v1.py
|
py
| 1,636 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "mainsite.serializers.StripTagsCharField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mainsite.serializers.StripTagsCharField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mainsite.serializers.StripTagsCharField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mainsite.utils.OriginSetting.HTTP",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mainsite.utils.OriginSetting",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.URLField",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 33,
"usage_type": "name"
}
] |
13131048154
|
import psycopg2
import datetime
import time
import sys
import requests
import scipy.io.wavfile
import json
import vokaturi.Vokaturi as Vokaturi
Vokaturi.load("./vokaturi/OpenVokaturi-3-3-linux64.so")
from ms_text_analysis import *
from cassandra_test import *
MSSp = MSSpeechToText()
MST = MSTextAnalysis()
MSAD = MSAnomalyDetector()
CApi = CassandraAPI()
def get_emotions(filename):
(sample_rate, samples) = scipy.io.wavfile.read(filename)
buffer_length = len(samples)
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:
c_buffer[:] = samples[:] / 32768.0
else:
c_buffer[:] = 0.5 * (samples[:, 0] + samples[:, 1]) / 32768.0
voice = Vokaturi.Voice(sample_rate, buffer_length)
voice.fill(buffer_length, c_buffer)
quality = Vokaturi.Quality()
emotionProbabilities = Vokaturi.EmotionProbabilities()
voice.extract(quality, emotionProbabilities)
voice.destroy()
if quality.valid:
return ("%.3f" % emotionProbabilities.neutrality,
"%.3f" % emotionProbabilities.happiness,
"%.3f" % emotionProbabilities.sadness,
"%.3f" % emotionProbabilities.anger,
"%.3f" % emotionProbabilities.fear)
else: raise RuntimeError('bad quality')
DB_NAME = "defaultdb"
USER = "avnadmin"
PASSWORD = "hq3fi662tthholn2"
HOST = "pg-2e774192-dimak24-5fb9.aivencloud.com"
PORT = "21756"
INFLUXDB_HOST = "influx-1ab60b47-dimak24-5fb9.aivencloud.com"
INFLUXDB_PORT = "21756"
INFLUXDB_DB_NAME = "defaultdb"
INFLUXDB_USER = "avnadmin"
INFLUXDB_PASSWORD = "e6gkm3n9bmvcbpfb"
def _execute_op(operation):
conn = psycopg2.connect(
database=DB_NAME,
user=USER,
password=PASSWORD,
host=HOST,
port=PORT)
cur = conn.cursor()
try:
res = operation(cur)
conn.commit()
return res
except psycopg2.Error as e:
print(e)
# raise
finally:
cur.close()
conn.close()
def _execute(*args):
_execute_op(lambda cur: cur.execute(*args))
def _execute_fetch(*args):
def _op(cur):
cur.execute(*args)
return cur.fetchall()
return _execute_op(_op)
def _influxdb_query(query):
return json.loads(requests.post(f'https://{INFLUXDB_USER}:{INFLUXDB_PASSWORD}@{INFLUXDB_HOST}:{INFLUXDB_PORT}/query?db={INFLUXDB_DB_NAME}',
data='q=' + query, headers={'content-type': 'application/x-www-form-urlencoded'}).text)
def _influxdb_write(measurement, args):
query = ', '.join([','.join([f'{tag["name"]}={tag["value"]}' for tag in arg['tags']]) + f' value={arg["value"]}' for arg in args])
return requests.post(f'https://{INFLUXDB_USER}:{INFLUXDB_PASSWORD}@{INFLUXDB_HOST}:{INFLUXDB_PORT}/write?db={INFLUXDB_DB_NAME}',
data=f'{measurement},{query} {int(time.time() * 1e9)}',
headers={'content-type': 'application/x-www-form-urlencoded'}).text
def cassandra_insert(u_id, timestamp, filename, comment='comment'):
with open(filename, 'rb') as file:
print(CApi.db_execute("""INSERT INTO cycling.records (u_d,r_time,audio,comment)
VALUES(%s,%s,%s,%s)""",
(str(u_id), timestamp, file.read(), comment)))
def load_record(u_id, timestamp):
result_set = CApi.db_query("SELECT * FROM cycling.records where u_d=%s and r_time=%s ALLOW FILTERING;", (u_id, int(timestamp)))
for res in result_set: return res.audio
def create_tables():
_execute('''CREATE TABLE diary
(u_id INT NOT NULL,
r_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
script TEXT NOT NULL,
score REAL NOT NULL,
is_anomaly INT NOT NULL,
neutrality REAL NOT NULL,
happiness REAL NOT NULL,
sadness REAL NOT NULL,
anger REAL NOT NULL,
fear REAL NOT NULL);''')
_execute('''CREATE TABLE users
(u_id SERIAL,
name CHAR(30) NOT NULL,
age INT);''')
_execute('''CREATE TABLE avatars
(u_id INT NOT NULL,
name CHAR(50) NOT NULL,
level INT NOT NULL DEFAULT 0);''')
CApi.db_execute("""CREATE KEYSPACE cycling WITH REPLICATION = {
'class' : 'SimpleStrategy',
'replication_factor' : 1
};""")
CApi.db_execute("""CREATE TABLE records (
u_d text PRIMARY KEY,
r_time int,
audio blob,
comment text );""")
# debug
def drop_tables():
return
CApi.db_execute('DROP TABLE records;')
_execute('DROP TABLE diary, users, avatars;')
_influxdb_query('DROP MEASUREMENT mental_metrics')
def create_user(name, age=None, avatar_name=None):
if avatar_name is None:
avatar_name = f'{name}\'s avatar'
assert len(name) <= 30
assert len(avatar_name) <= 50
if age is not None:
res = _execute_fetch('''INSERT INTO users (name, age)
VALUES (%s, %s) RETURNING u_id;''', (name, age))
else:
res = _execute_fetch('''INSERT INTO users (name)
VALUES (%s) RETURNING u_id;''', (name,))
u_id = res[0][0]
_execute('''INSERT INTO avatars (u_id, name)
VALUES (%s, %s);''', (u_id, avatar_name))
return u_id
def to_timestamp(influxdb_date):
d, t = influxdb_date[:-1].split('T')
h, m, s = t.split(':')
s = int(s.split('.')[0])
h = int(h) + 3
return int(datetime.datetime(*list(map(int, d.split('-'))), h, int(m), s).strftime("%s"))
def to_azure(timestamp):
_date = datetime.date.fromtimestamp(timestamp)
return f'{_date.year}-{_date.month}-{_date.day}T12:00:00Z'
def make_daily_series(series):
s, n, last = 0, 0, None
res = []
for record in sorted(series, key=lambda _record: to_timestamp(_record[0])) + [(-1, -1)]:
timestamp, metric = record
if timestamp != -1:
date = int(datetime.date.fromtimestamp(to_timestamp(timestamp)).strftime("%s"))
else: date = -2
if date != last:
if last is not None:
s /= n
if len(res) > 0: mean = (s + res[-1][1]) / 2
while len(res) > 0 and date - res[-1][0] > 86400 * 2:
res.append([res[-1][0] + 86400, mean])
res.append([last, s])
last = date
n, s = 0, 0
s += metric
n += 1
for i in range(len(res)): res[i][0] = to_azure(res[i][0])
return res
def insert_record(u_id, data_file, date=int(time.time())):
if date is None: date=int(time.time())
text = MSSp.process(data_file)
score = MST.process(text)
metrics = get_emotions(data_file)
for type, value in zip(['neutrality', 'happiness', 'sadness', 'anger', 'fear', 'score'],
metrics + (score,)):
_influxdb_write('mental_metrics',
[{'tags': [
{'name': 'u_id', 'value': u_id},
{'name': 'type', 'value': type}],
'value': value}])
res = _influxdb_query('SELECT "time","value" FROM "mental_metrics" WHERE u_id=\'%s\' AND type=\'score\''%u_id)
try:
series = res['results'][0]['series'][0]['values']
series = make_daily_series(series)
if len(series) < 12: is_anomaly = 0
else: is_anomaly = MSAD.process(series)
except:
print(res)
raise
new_level = _execute_fetch('''UPDATE avatars SET level = level + 1 WHERE u_id = %s
RETURNING level;''', (u_id,))[0][0]
print(u_id, date)
cassandra_insert(u_id, date, data_file)
return _execute_fetch('''INSERT INTO diary (u_id, r_time, script, score, is_anomaly, neutrality, happiness, sadness, anger, fear)
VALUES (%s, to_timestamp(%s), %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING score, is_anomaly, neutrality, happiness, sadness, anger, fear;''',
(u_id, date, text, score, is_anomaly, *metrics))[0] + (int(new_level), text)
def get_records(u_id, date_from=None, date_to=None, phrase=None):
date_range = ''
if date_from is not None:
date_range += f" AND r_time >= to_timestamp('{date_from}', 'yyyy-mm-dd')"
if date_to is not None:
date_range += f" AND r_time < to_timestamp('{date_to}', 'yyyy-mm-dd')"
if phrase is not None:
return _execute_fetch(f"""SELECT r_time, script FROM diary
WHERE u_id = {u_id} {date_range} AND
data LIKE '%{phrase}%'""")
return _execute_fetch(f"""SELECT r_time, script FROM diary
WHERE u_id = {u_id} {date_range}""")
def get_audio(u_id, timestamp):
return load_record(u_id, timestamp)
|
raid-7/SmartDiary
|
backend/main.py
|
main.py
|
py
| 9,127 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "vokaturi.Vokaturi.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "scipy.io.wavfile.io.wavfile.read",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.io",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.SampleArrayC",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.Voice",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.Quality",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.EmotionProbabilities",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "psycopg2.connect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.date.fromtimestamp",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.fromtimestamp",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 227,
"usage_type": "call"
}
] |
42896231712
|
import math
from functools import partial
from typing import Any, Callable
import jax
import jax.numpy as jnp
from chex import ArrayTree
from jax import tree_map, vmap
from jax.scipy.special import logsumexp
from ..resamplings import multinomial
STATE = Any
@partial(jax.jit, static_argnums=(2, 3, 4), donate_argnums=(0, 1))
def operator(inputs_a: STATE, inputs_b: STATE, log_weight_fn: Callable[[ArrayTree, ArrayTree, Any], float],
n_samples: int, last_step: bool):
"""
Operator corresponding to the stitching operation of the conditional dSMC algorithm.
Parameters
----------
inputs_a: STATE
A tuple of three arguments.
First one is the state of the partial dSMC smoother to the left of the current time step.
Second are the jax random keys used for resampling at the time steps to the left of the current time step.
Third are the parameters used to compute the mixing weights to the left of the current time step.
inputs_b: STATE
Same as `inputs_a` but to the right of the current time step
log_weight_fn: callable
Function that computes the un-normalised stitching N^2 weights, first argument is x_{t-1}, second is x_t, and
third is the parameters.
It will be automatically batched so only needs to be expressed elementwise
n_samples: int
Number of samples in the resampling
last_step: bool
Whether we are at the last time step or not. If so, we only need one trajectory.
Returns
-------
"""
# Unpack the states
state_a, keys_a, params_a = inputs_a
state_b, keys_b, params_b = inputs_b
trajectories_a, log_weights_a, origins_a = state_a
trajectories_b, log_weights_b, origins_b = state_b
weights = get_weights_batch(trajectories_a, log_weights_a,
trajectories_b, log_weights_b, params_b,
log_weight_fn)
if last_step:
# If last step
idx = jax.random.choice(keys_b[0], n_samples ** 2, p=jnp.ravel(weights))
l_idx, r_idx = jnp.unravel_index(idx, (n_samples, n_samples))
else:
idx = multinomial(keys_b[0], jnp.ravel(weights), n_samples)
l_idx, r_idx = jax.vmap(jnp.unravel_index, in_axes=[0, None])(idx, (n_samples, n_samples))
return _gather_results(l_idx, r_idx, n_samples,
trajectories_a, origins_a, log_weights_a, keys_a, params_a,
trajectories_b, origins_b, log_weights_b, keys_b, params_b)
def _gather_results(left_idx, right_idx, n_samples,
trajectories_a, origins_a, log_weights_a, keys_a, params_a,
trajectories_b, origins_b, log_weights_b, keys_b, params_b):
# If we are using conditional dSMC, we need to make sure to preserve the first trajectory.
# Resample the trajectories
trajectories_a = tree_map(lambda z: jnp.take(z, left_idx, 1), trajectories_a)
trajectories_b = tree_map(lambda z: jnp.take(z, right_idx, 1), trajectories_b)
# Keep track of the trajectories origins for analysis down the line (not used in the algo)
origins_a = jnp.take(origins_a, left_idx, 1)
origins_b = jnp.take(origins_b, right_idx, 1)
# Gather the results
keys = jnp.concatenate([keys_a, keys_b])
params = tree_map(lambda a, b: jnp.concatenate([a, b]), params_a, params_b)
origins = jnp.concatenate([origins_a, origins_b])
trajectories = tree_map(lambda a, b: jnp.concatenate([a, b]), trajectories_a, trajectories_b)
log_weights = jnp.concatenate([jnp.full_like(log_weights_a, -math.log(n_samples)),
jnp.full_like(log_weights_b, -math.log(n_samples))])
return (trajectories, log_weights, origins), keys, params
def get_weights_batch(trajectories_a, log_weights_a,
trajectories_b, log_weights_b, params_b,
log_weight_fn: Callable[[ArrayTree, ArrayTree, Any], float]):
# House keeping to get the required inputs.
params_t = tree_map(lambda z: z[0], params_b)
x_t_1 = tree_map(lambda z: z[-1], trajectories_a)
x_t = tree_map(lambda z: z[0], trajectories_b)
log_w_t_1 = log_weights_a[-1]
log_w_t = log_weights_b[0]
log_weights = get_log_weights(x_t_1, log_w_t_1,
x_t, log_w_t, params_t,
log_weight_fn)
ell_inc = logsumexp(log_weights)
weights = jnp.exp(log_weights - ell_inc)
return weights
def get_log_weights(x_t_1, log_w_t_1,
x_t, log_w_t, params_t,
log_weight_fn):
# House keeping to get the required inputs.
# This nested vmap allows to define log_weight_fn more easily at the API level. This is to create a
# (N,N) -> N^2 function while only having to care about elementwise formulas.
# if log_weight_fn = lambda a, b: u * v, then this corresponds to np.outer.
vmapped_log_weight_fn = vmap(vmap(log_weight_fn,
in_axes=[None, 0, None], out_axes=0),
in_axes=[0, None, None], out_axes=0)
log_weight_increment = vmapped_log_weight_fn(x_t_1, x_t, params_t) # shape = M, N
# Take the corresponding time step and reshape to allow for adding residual weights in parallel
log_weights = log_weight_increment + log_w_t_1[:, None] + log_w_t[None, :]
return log_weights
|
AdrienCorenflos/aux-ssm-samplers
|
aux_samplers/_primitives/csmc/pit/operator.py
|
operator.py
|
py
| 5,444 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "typing.Any",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "chex.ArrayTree",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "jax.random.choice",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "jax.random",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "jax.numpy.ravel",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "jax.numpy.unravel_index",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "resamplings.multinomial",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "jax.numpy.ravel",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "jax.vmap",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "jax.numpy.unravel_index",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "jax.numpy",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jax.jit",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "jax.tree_map",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "jax.numpy.take",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "jax.numpy.take",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "jax.numpy.take",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "jax.numpy.take",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "jax.numpy.full_like",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "jax.numpy.full_like",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "math.log",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "chex.ArrayTree",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "jax.tree_map",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "jax.tree_map",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "jax.scipy.special.logsumexp",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "jax.numpy.exp",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "jax.vmap",
"line_number": 119,
"usage_type": "call"
}
] |
21247144104
|
from datetime import timedelta
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.sensors.filesystem import FileSensor
from directories import (
VOLUME_PATH, LAST_MODEL_DIR, PREDICTIONS_DIR,
RAW_DATA_DIR, PROCESSED_DATA_DIR, START_DATE,
PROD_DATE,
)
default_args = {
"owner": "airflow",
"email": ["[email protected]"],
"email_on_failure": True,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
with DAG(
"predict_daily",
default_args=default_args,
schedule_interval="@daily",
start_date=START_DATE,
) as dag:
start_task = DummyOperator(task_id="start-predict")
wait_data = FileSensor(
task_id="wait-data",
filepath="./data/raw/{{ ds }}/data.csv",
poke_interval=10,
retries=100,
)
wait_model = FileSensor(
task_id="wait-model",
filepath=f"./data/models/{PROD_DATE}/model.pkl",
poke_interval=10,
retries=100,
)
preprocess_cmd = (
f" --input-dir {RAW_DATA_DIR}"
f" --output-dir {PROCESSED_DATA_DIR}"
f" --mode eval"
)
preprocess = DockerOperator(
image="airflow-preprocess",
task_id="docker-preprocess",
command=preprocess_cmd,
network_mode="bridge",
do_xcom_push=False,
volumes=[f"{VOLUME_PATH}:/data"],
)
predict_cmd = (
f" --input-dir {PROCESSED_DATA_DIR}"
f" --model-dir {LAST_MODEL_DIR}"
f" --output-dir {PREDICTIONS_DIR}"
)
predict = DockerOperator(
image="airflow-predict",
task_id="docker-predict",
command=predict_cmd,
network_mode="bridge",
do_xcom_push=False,
volumes=[f"{VOLUME_PATH}:/data"],
)
end_task = DummyOperator(task_id="end-predict")
start_task >> [wait_data, wait_model] >> preprocess >> predict >> end_task
|
made-ml-in-prod-2021/truengineer
|
airflow_ml_dags/dags/predict_daily.py
|
predict_daily.py
|
py
| 1,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.timedelta",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "directories.START_DATE",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "airflow.operators.dummy.DummyOperator",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "airflow.sensors.filesystem.FileSensor",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "airflow.sensors.filesystem.FileSensor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "directories.PROD_DATE",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "directories.RAW_DATA_DIR",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "directories.PROCESSED_DATA_DIR",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "airflow.providers.docker.operators.docker.DockerOperator",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "directories.VOLUME_PATH",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "directories.PROCESSED_DATA_DIR",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "directories.LAST_MODEL_DIR",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "directories.PREDICTIONS_DIR",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "airflow.providers.docker.operators.docker.DockerOperator",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "directories.VOLUME_PATH",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "airflow.operators.dummy.DummyOperator",
"line_number": 76,
"usage_type": "call"
}
] |
37598101859
|
import pytz
from sqlalchemy.orm import Session
import models
import schemas
def create_module_build(db: Session, build: schemas.ModuleBuild):
print(build)
db_build = models.ModuleBuild(
module=build.module,
build_time=build.build_time,
result=build.result,
finished_at=build.finished_at.astimezone(pytz.utc),
maven_opts=build.maven_opts,
uname=build.uname,
uuid=str(build.uuid),
cpu=build.cpu,
mem=build.mem,
)
db.add(db_build)
db.flush()
return db_build
|
fresch/maven-build-tracker
|
crud/CreateBuild.py
|
CreateBuild.py
|
py
| 555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "schemas.ModuleBuild",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.ModuleBuild",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 15,
"usage_type": "attribute"
}
] |
17609874011
|
# encoding: utf-8
from django.core.management import BaseCommand
from issuer.models import BadgeClass
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--limit',
type=int,
help='Number of model instances to process in a batch',
default=1000
)
def handle(self, *args, **options):
model = BadgeClass
processed_count = 0
limit = options['limit']
queryset = model.objects.filter(image_hash='').exclude(image='')
processing = True
while processing:
active_set = queryset[0:limit]
self.stdout.write(str(active_set.query))
if active_set.exists():
for instance in active_set:
instance.save()
self.stdout.write("Calculated initial image_hash for {} #{}: {}".format(
instance.__class__.__name__, instance.pk, instance.image_hash)
)
processed_count += 1
else:
processing = False
self.stdout.write("Finished processing populate_image_hashes for model {}. {} records updated.".format(
model.__name__, processed_count)
)
|
reedu-reengineering-education/badgr-server
|
apps/issuer/management/commands/populate_image_hashes.py
|
populate_image_hashes.py
|
py
| 1,279 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "django.core.management.BaseCommand",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeClass",
"line_number": 17,
"usage_type": "name"
}
] |
70818525948
|
import speech_recognition as sr
import multiprocessing as mp
import os
import time
def func(n):
print("Task {} convert successfully".format(n))
speechToText()
time.sleep(2) #simulate processing or server return time
print("Task {} has been done now.".format(n))
def speechToText():
r = sr.Recognizer()
# Reading Audio file as source
# listening the audio file and store in audio_text variable
# The path should be correct
with sr.AudioFile('Sample.wav') as source:
audio = r.listen(source)
# Using exception handling in case the api could not be acceessed successfully.
try:
# using google speech recognition
text = r.recognize_google(audio)
print(text)
except:
print('Could not access API, please run it again.')
if __name__ == '__main__':
nums_core = mp.cpu_count()
print("There are {} cores being used now.".format(nums_core))
pool = mp.Pool(nums_core) #use all available cores
for i in range(0, 16):
pool.apply_async(func, args=(i,))
pool.close()
pool.join()
|
CHAODENG/Project4
|
project4.py
|
project4.py
|
py
| 1,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "speech_recognition.AudioFile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 39,
"usage_type": "call"
}
] |
40087266458
|
import os
from meteo_ist.models import meteo_data, range_data
from django.utils.dateparse import parse_date
def upload_db(data):
for i in range(0, len(data['datetime'])):
date = parse_date(data['datetime'][i]) # parse string do date format
pp = data['data']['pp'][i]
pres = data['data']['pres'][i]
rad = data['data']['rad'][i]
rh = data['data']['rh'][i]
temp = data['data']['temp'][i]
wd = data['data']['wd'][i]
wg = data['data']['wg'][i]
ws = data['data']['ws'][i]
b = meteo_data(date, pp, pres, rad, rh, temp, wd, wg, ws)
b.save()
|
sandroferreira97/meteo_ist
|
meteo_ist/services.py
|
services.py
|
py
| 632 |
python
|
tr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.utils.dateparse.parse_date",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "meteo_ist.models.meteo_data",
"line_number": 18,
"usage_type": "call"
}
] |
74341979708
|
from collections import deque
count = int(input())
dataDeque = deque(list(range(1, count+1)))
while True:
if len(dataDeque) == 1:
print(dataDeque[0])
break
dataDeque.popleft()
dataDeque.append(dataDeque.popleft())
|
KingPiggy/PS
|
Baekjoon/큐, 덱/2164번 카드2.py
|
2164번 카드2.py
|
py
| 255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
}
] |
197603537
|
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
import aliens_functions as af
# 检测精灵碰撞
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,
aliens, bullets):
"""响应子弹和外星人的碰撞"""
# 检查是否有子弹击中了外星人
# 如果是这样,就删除相应的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
# 如果外星人全部被消灭,难度提升一个等级
start_new_level(ai_settings, screen, stats, sb, ship,
aliens, bullets)
def ship_hit(ai_settings, stats, sb, screen, ship, aliens, bullets):
"""响应被外星人撞到飞船"""
if stats.ships_left > 0:
# 将ships_left减1
stats.ships_left -= 1
# 更新记分牌
sb.prep_ships()
# 清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
# 创建一群新的外星人,并将飞船放到屏幕底端中央
af.create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# 暂停
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
# 其他检查
def check_high_score(stats, sb):
"""检查是否诞生了新的最高分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
def start_new_level(ai_settings, screen, stats, sb, ship,
aliens, bullets):
"""提升游戏难度等级"""
if len(aliens) == 0:
# 如果整群外星人都被消灭,提高一个等级
bullets.empty()
ai_settings.increase_speed()
# 提高等级
stats.level += 1
sb.prep_level()
af.create_fleet(ai_settings, screen, ship, aliens)
|
wanwan2qq/alien_invasion
|
collisions_functions.py
|
collisions_functions.py
|
py
| 2,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "aliens_functions.create_fleet",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.set_visible",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "aliens_functions.create_fleet",
"line_number": 70,
"usage_type": "call"
}
] |
16312390211
|
from typing import NamedTuple
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class LanguageDataset(NamedTuple):
records: tf.data.Dataset
vocab_size: int
def load(batch_size: int, sequence_length: int) -> LanguageDataset:
"""Load LM1B dataset, returning it and vocab_size."""
ds, ds_info = tfds.load(
'lm1b/subwords32k',
split=tfds.Split.TRAIN,
shuffle_files=True,
with_info=True)
crop_size = sequence_length + 1
ds = ds.repeat()
# Convert the dataset to constant-size int32 tensors.
ds = ds.map(lambda d: tf.cast(d['text'], tf.int32))
ds = ds.map(lambda t: _crop_or_pad(t, crop_size, pad_token=0))
ds = ds.shuffle(batch_size * 10)
# Create the language modeling observation/target pairs and batch them up.
ds = ds.map(lambda t: dict(obs=t[:-1], target=t[1:]))
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
ds = tfds.as_numpy(ds)
return LanguageDataset(ds, ds_info.features['text'].encoder.vocab_size)
def _crop_or_pad(value, size, pad_token):
"""Either crop or pad value to be of size size."""
val_size = tf.size(value)
pad = lambda: tf.pad(value, [[0, size - val_size]], 'CONSTANT', constant_values=pad_token)
return tf.cond(val_size < size, pad, lambda: value[:size])
|
ChrisWaites/data-deletion
|
src/adaptive_deletion/nlp/transformer/dataset.py
|
dataset.py
|
py
| 1,313 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "typing.NamedTuple",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.data",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tensorflow_datasets.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow_datasets.Split",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2.cast",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.int32",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2.data",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "tensorflow_datasets.as_numpy",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2.size",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.pad",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.cond",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 38,
"usage_type": "name"
}
] |
43566450593
|
import requests
from pprint import pprint
import os
SHEET_ENDPOINT = "https://api.sheety.co/a65d37e4e4c4751b050905bbc69d2c13/myFlightDeals/prices"
HEADERS = {
"Authorization":os.environ.get("AUTH"),
"Content-Type":"application/json",
}
USR_ENDPOINT = os.environ.get("SHEET_ENd")
class DataManager:
#This class is responsible for talking to the Google Sheet.
def __init__(self):
self.response = requests.get(url=SHEET_ENDPOINT, headers=HEADERS)
self.response.raise_for_status()
def get_info(self):
data = self.response.json()["prices"]
return data
def update(self,row_id,iata):
changes = {
"price": {
"iataCode":iata,
}
}
edit = requests.put(url=f"{SHEET_ENDPOINT}/{row_id}",json=changes,headers=HEADERS)
edit.raise_for_status()
def get_emails(self):
mail_response = requests.get(url=USR_ENDPOINT,headers=HEADERS)
mail_response.raise_for_status()
mail_data = mail_response.json()["users"]
return mail_data
|
HazorTremz/FlightDealFinder
|
data_manager.py
|
data_manager.py
|
py
| 1,077 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
}
] |
44018209186
|
import numpy as np
from modAL.models import ActiveLearner
from modAL.multilabel import SVM_binary_minimum
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
n_samples = 500
X = np.random.normal(size=(n_samples, 2))
y = np.array([[int(x1 > 0), int(x2 > 0)] for x1, x2 in X])
n_initial = 10
initial_idx = np.random.choice(range(len(X)), size=n_initial, replace=False)
X_initial, y_initial = X[initial_idx], y[initial_idx]
X_pool, y_pool = np.delete(X, initial_idx, axis=0), np.delete(y, initial_idx, axis=0)
learner = ActiveLearner(
estimator=OneVsRestClassifier(LinearSVC()),
query_strategy=SVM_binary_minimum,
X_training=X_initial, y_training=y_initial
)
n_queries = 10
for idx in range(n_queries):
query_idx, query_inst = learner.query(X_pool)
learner.teach(X_pool[query_idx].reshape(1, -1), y_pool[query_idx].reshape(1, -1))
X_pool, y_pool = np.delete(X_pool, query_idx, axis=0), np.delete(y_pool, query_idx, axis=0)
|
modAL-python/modAL
|
tests/example_tests/multilabel_svm.py
|
multilabel_svm.py
|
py
| 981 |
python
|
en
|
code
| 2,058 |
github-code
|
6
|
[
{
"api_name": "numpy.random.normal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "modAL.models.ActiveLearner",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.multiclass.OneVsRestClassifier",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "modAL.multilabel.SVM_binary_minimum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.delete",
"line_number": 26,
"usage_type": "call"
}
] |
20615061350
|
'''test conf'''
import os
import datetime
from tokenleaderclient.rbac import wfc
from flexflow.configs.config_handler import Configs
from flexflow.dbengines.sqlchemy.models import dbdriver
test_data_path = os.path.join(os.path.dirname(__file__),
os.pardir, 'tests', 'testdata')
test_client_conf_file = os.path.join(test_data_path, 'test_client_configs.yml')
conf_file= os.path.join(test_data_path,'test_flexflow_configs.yml')
print(test_client_conf_file, conf_file)
# must_have_keys_in_yml = {'host_name',
# 'host_port',
# 'ssl',
# 'ssl_settings',
# 'database',
# 'secrets'
# 'celery'
# }
must_have_keys_in_yml = {}
testconf = Configs('flexflow', conf_file=conf_file, must_have_keys_in_yml=must_have_keys_in_yml)
yml = testconf.yml
con_string = dbdriver.get_connection_settings(testconf)
print('con_string', con_string)
test_db_conf = { 'SQLALCHEMY_DATABASE_URI': con_string,
'SQLALCHEMY_TRACK_MODIFICATIONS': False }
testwfc= wfc.WorkFuncContext()
testwfc.username = 'user1'
testwfc.org = 'ITC'
testwfc.orgunit = 'ou1'
testwfc.department = 'dept1'
testwfc.roles = ['role1', ]
testwfc.name = 'wfc1'
testwfc.email = '[email protected]'
testwfc.request_id = 'hhihihhh-890809-jklkk;k-ytfty'
testwfc.time_stamp = datetime.datetime.utcnow()
testwfc.client_address = '10.10.10.10'
tspwfc= wfc.WorkFuncContext()
tspwfc.username = 'TSP1user1'
tspwfc.org = 'TSP1'
tspwfc.orgunit = 'TSP1ou1'
tspwfc.department = 'TSP1dept1'
tspwfc.roles = ['role1', ]
tspwfc.name = 'TSP1wfc1'
tspwfc.email = '[email protected]'
tspwfc.request_id = 'TSP1ihhh-890809-jklkk;k-ytfty'
tspwfc.time_stamp = datetime.datetime.utcnow()
tspwfc.client_address = '10.10.10.10'
ITSSwfc= wfc.WorkFuncContext()
ITSSwfc.username = 'ITSSuser1'
ITSSwfc.org = 'ITC'
ITSSwfc.orgunit = 'ITSS'
ITSSwfc.department = 'ITSSept1'
ITSSwfc.roles = ['role1', ]
ITSSwfc.name = 'ITSSSwfc1'
ITSSwfc.email = '[email protected]'
ITSSwfc.request_id = 'ITSSihhh-890809-jklkk;k-ytfty'
ITSSwfc.time_stamp = datetime.datetime.utcnow()
ITSSwfc.client_address = '10.10.10.10'
MISwfc= wfc.WorkFuncContext()
MISwfc.username = 'MISuser1'
MISwfc.org = 'ITC'
MISwfc.orgunit = 'MIS1'
MISwfc.department = 'MISept1'
MISwfc.roles = ['role1', ]
MISwfc.name = 'MISwfc1'
MISwfc.email = '[email protected]'
MISwfc.request_id = 'MISihhh-890809-jklkk;k-ytfty'
MISwfc.time_stamp = datetime.datetime.utcnow()
MISwfc.client_address = '10.10.10.10'
|
BhujayKumarBhatta/flexflow
|
flexflow/configs/testconf.py
|
testconf.py
|
py
| 2,682 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.pardir",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flexflow.configs.config_handler.Configs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flexflow.dbengines.sqlchemy.models.dbdriver.get_connection_settings",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flexflow.dbengines.sqlchemy.models.dbdriver",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 76,
"usage_type": "attribute"
}
] |
17215982737
|
# coding: utf-8
import cv2
import dlib
import sys
import face_recognition
import numpy as np
import os
def getdemo(face_file_path):
# 导入人脸检测模型
print("当前检测图片为:",face_file_path)
predicter_path ='shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
# 导入检测人脸特征点的模型
sp = dlib.shape_predictor(predicter_path)
# 读入图片
bgr_img=cv2.imdecode(np.fromfile(face_file_path,dtype=np.int8),-1)
# bgr_img = cv2.imread(face_file_path)
if bgr_img is None:
print("Sorry, we could not load '{}' as an image".format(face_file_path))
return
# opencv的颜色空间是BGR,需要转为RGB才能用在dlib中
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
# bgr_img = cv2.imread(face_file_path)
if(rgb_img.shape[0]<2000):
scale = 3000.0/rgb_img.shape[1]
rgb_img = cv2.resize(rgb_img,(3000,int(rgb_img.shape[0]/(rgb_img.shape[1])*3000)))
# opencv的颜色空间是BGR,需要转为RGB才能用在dlib中
# rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
# 检测图片中的人脸
dets = detector(rgb_img, 1)
# (top, right, bottom, left) 803 982 892 892
# (left,top, right, bottom) 892 803 982 892
# 检测到的人脸数量
faceNum = len(dets)
print(faceNum)
if faceNum == 0:
print("Sorry, there were no faces found in '{}'".format(face_file_path))
return
face_locations = []
for det in dets:
face_locations.append((det.top(),det.right(),det.bottom(),det.left()))
faceDic = {}
faceDic['faceNum'] = faceNum
face_landmarks = face_recognition.face_landmarks(rgb_img,face_locations) #72个点
face_encodings = face_recognition.face_encodings(rgb_img,face_locations)
# 识别人脸特征点,并保存下来
faces = dlib.full_object_detections()
for det in dets:
faces.append(sp(rgb_img, det))
# 人脸对齐
images = dlib.get_face_chips(rgb_img, faces, size=320)
# 显示计数,按照这个计数创建窗口
image_cnt = 0
# 显示对齐结果
for image in images:
image_cnt += 1
cv_rgb_image = np.array(image).astype(np.uint8)# 先转换为numpy数组
cv_bgr_image = cv2.cvtColor(cv_rgb_image, cv2.COLOR_RGB2BGR)# opencv下颜色空间为bgr,所以从rgb转换为bgr
print("正在保存图片 :" + str(image_cnt)+'.jpg')
cv2.imwrite('./'+str(image_cnt)+'.jpg',cv_bgr_image)
# face_file_path = 'D:/py/My_work/6_27_facebook/mtcnn-keras-master/img1/M/静.jpg'# 要使用的图片,图片放在当前文件夹中
# face_file_path = '../face/d/静.jpg'# 要使用的图片,图片放在当前文件夹中
face_file_path = '../face/9.jpg'# 要使用的图片,图片放在当前文件夹中
getdemo(face_file_path)
print("写入完毕..")
|
u19900101/ImgManage
|
pythonModule/python/saveFace.py
|
saveFace.py
|
py
| 2,892 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dlib.shape_predictor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imdecode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_landmarks",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_encodings",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "dlib.full_object_detections",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "dlib.get_face_chips",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 67,
"usage_type": "call"
}
] |
70777898428
|
import torch
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from torch import optim, nn
from DQN import DQN
import torch.nn.functional as F
class Agent:
def __init__(self, input_size, output_size, device='cpu', learning_rate= 0.001, gamma=0.99, epsilon=0.6, epsilon_min=0.01, epsilon_decay=0.9995,batch_size=32,memory_size=100):
self.device = device
self.output_size = output_size
self.policy_net = DQN(input_size, output_size).to(device)
self.target_net = DQN(input_size, output_size).to(device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=learning_rate, weight_decay=0.0001)
self.memory = []
self.batch_size = batch_size
self.memory_size = memory_size
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.lossfn = nn.MSELoss()
self.history = {'loss':[]}
def make_action(self, state):
if np.random.rand() < self.epsilon:
return np.random.choice(self.output_size)
with torch.no_grad():
state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
q_values = self.policy_net(state_tensor)
best_action, best_action_index = torch.max(q_values[0], 1)
action = best_action_index.item()
return action
def make_eval_action(self,state):
with torch.no_grad():
state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
q_values = self.policy_net(state_tensor)
best_action, best_action_index = torch.max(q_values[0], 1)
action = best_action_index.item()
return action
def add_experience(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if len(self.memory) > self.memory_size:
self.memory.pop(0)
def split_batch(self,batch):
states = []
actions = []
rewards = []
next_states = []
dones = []
for experience in batch:
states.append(self.memory[experience][0])
actions.append(self.memory[experience][1])
rewards.append(self.memory[experience][2])
next_states.append(self.memory[experience][3])
dones.append(self.memory[experience][4])
return np.array(states),np.array(actions),np.array(rewards),np.array(next_states),np.array(dones)
def update_policy(self):
self.policy_net.train()
if len(self.memory) < self.batch_size:
return
batch = np.random.choice(len(self.memory), self.batch_size, replace=False)
states,actions,rewards,next_states,dones = self.split_batch(batch)
state_batch = torch.FloatTensor(states).to(self.device)
action_batch = torch.LongTensor(actions).to(self.device)
reward_batch = torch.FloatTensor(rewards).to(self.device)
next_state_batch = torch.FloatTensor(next_states).to(self.device)
done_batch = torch.FloatTensor(dones).to(self.device)
q_values = self.policy_net(state_batch).squeeze(1).gather(1, action_batch.unsqueeze(1)).squeeze(1)
with torch.no_grad():
next_q_values = self.target_net(next_state_batch).squeeze(1)
next_q_values,_ = torch.max(next_q_values,1)
expected_q_values = (next_q_values * self.gamma) * (1 - done_batch) + reward_batch
loss = self.lossfn(q_values, expected_q_values)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
self.history['loss'].append(loss.item())
def update_target(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def store_transition(self, transition):
self.memory.append(transition)
def __len__(self):
return len(self.memory)
def save_model(self):
self.policy_net.save_model()
def update_target_model(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def set_mode_eval(self):
self.policy_net.eval()
def set_mode_train(self):
self.policy_net.train()
|
stefanos50/DQN-Trading-Agent
|
Agent.py
|
Agent.py
|
py
| 4,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "DQN.DQN",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "DQN.DQN",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.random.rand",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 82,
"usage_type": "call"
}
] |
33208629801
|
from django.shortcuts import render
from django.views.generic.base import View
from .models import course
from pure_pagination import Paginator, PageNotAnInteger, EmptyPage
# Create your views here.
class CourseListView(View):
def get(self, request):
all_course = course.objects.all()
fav_course = all_course.order_by('-fav_numbers')[:2]
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# sort page
sort = request.GET.get('sort', '')
if sort:
if sort == 'hot':
all_course = all_course.order_by('-fav_numbers')
print('all orgs by students', all_course)
if sort == 'students':
all_course = all_course.order_by('-students')
else:
all_course = all_course.order_by('-add_time')
# page seprate
p = Paginator(all_course, 5, request=request)
courses_by_page = p.page(page)
course_number = all_course.count()
return render(request, 'course-list.html', {
'all_course': courses_by_page,
'fav_course': fav_course,
'course_number': course_number,
'sort': sort
})
def post(self, request):
pass
class CourseDetailView(View):
def get(self, request, course_id):
course_info = course.objects.filter(id=course_id)[0]
tag = course_info.tag
print('what tag is ', tag)
course_org = course_info.courseOrg
teacher_num = course_org.teacher_set.all().count()
all_related_courses = course.objects.filter(tag=tag)[:1]
print('all related courses %s' % all_related_courses)
return render(request, 'course-detail.html', {
'course_info': course_info,
'course_org': course_org,
'teacher_num': teacher_num,
'related_courses': all_related_courses
})
|
LittleBirdLiu/MXonline_Task
|
apps/course/views.py
|
views.py
|
py
| 1,955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.base.View",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.course.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.course.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.course",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pure_pagination.PageNotAnInteger",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pure_pagination.Paginator",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.course.objects.filter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.course.objects",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "models.course",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.course.objects.filter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "models.course.objects",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "models.course",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 53,
"usage_type": "call"
}
] |
18842937658
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import random
from collections import deque
import gym
from gym.envs.registration import register
import math
import DQN as dqn
register(
id='CartPole-v1565',
entry_point='gym.envs.classic_control:CartPoleEnv',
# 'wrapper_config.TimeLimit.max_episode_steps' limits maximum step
tags={'wrapper_config.TimeLimit.max_episode_steps': 10001},
reward_threshold=-100
)
env = gym.make('CartPole-v1565')
# input_size = 4, output_size = 2
input_size = env.observation_space.shape[0]
output_size = env.action_space.n
dis = 0.9
REPLAY_MEMORY = 50000
epsilon = 1.0
epsilon_min = 0.01
epsilon_decay = 0.995
def replay_train(mainDQN, targetDQN, train_batch):
x_stack = np.empty(0).reshape(0, input_size)
y_stack = np.empty(0).reshape(0, output_size)
for state, action, reward, next_state, done in train_batch:
Q = mainDQN.predict(state)
if done:
Q[0, action] = reward
else:
Q[0, action] = reward + dis * np.max(targetDQN.predict(next_state))
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state])
return mainDQN.update(x_stack, y_stack)
def get_copy_var_ops(*, dest_scope_name="target", src_scope_name="main"):
op_holder = []
src_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)
dest_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
op_holder.append(dest_var.assign(src_var.value()))
return op_holder
def get_epsilon(t):
return max(epsilon_min, min(epsilon, 1.0 - math.log10((t+1) * epsilon_decay)))
def bot_play(mainDQN):
s = env.reset()
reward_sum = 0
done = False
while not done:
env.render()
a = np.argmax(mainDQN.predict(s))
s, reward, done, _ = env.step(a)
reward_sum += reward
print("Total score: {}".format(reward_sum))
def main():
max_episodes = 5000
replay_buffer = deque()
epsilon = 1.0
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, input_size, output_size, name="main")
targetDQN = dqn.DQN(sess, input_size, output_size, name="target")
tf.global_variables_initializer().run()
copy_ops = get_copy_var_ops(dest_scope_name="target", src_scope_name="main")
sess.run(copy_ops)
steps = []
for episode in range(max_episodes):
e = get_epsilon(episode)
done = False
step_count = 0
state = env.reset()
while not done:
if np.random.rand(1) < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_state, reward, done, _ = env.step(action)
replay_buffer.append((state, action, reward, next_state, done))
if len(replay_buffer) > REPLAY_MEMORY:
# popleft : return left value and pop it
replay_buffer.popleft()
state = next_state
step_count += 1
if epsilon > epsilon_min:
epsilon *= epsilon_decay
if step_count > 10000:
break
print("Episode: {} steps: {}".format(episode, step_count))
steps.append(step_count)
# if recent 10 episodes' steps mean > 300, break -> success
if len(steps) > 10:
steps.pop(0)
if np.mean(steps, axis=0) > 300:
break
if episode % 10 == 1:
for _ in range(50):
minibatch = random.sample(replay_buffer, 10)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
print("Loss: ", loss)
sess.run(copy_ops)
mainDQN.save()
targetDQN.save()
for _ in range(10):
bot_play(mainDQN)
if __name__ == "__main__":
main()
|
craclog/DQN_Cartpole
|
DQN_Cartpole.py
|
DQN_Cartpole.py
|
py
| 4,122 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "gym.envs.registration.register",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphKeys",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphKeys",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "math.log10",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "DQN.DQN",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "DQN.DQN",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 129,
"usage_type": "call"
}
] |
30918805074
|
"""
Template for generic Benchmark Test Case Workflow
"""
import sys
import json
import copy
from datetime import datetime
import pandas as pd
def build_iterator(**kwargs):
"""
For building the iterator of the benchmark
"""
iterator = [(2,'dummy'), (2, 'dummy2'), (4, 'dummy'), (2, 'dummy4')]
return iterator
def run_code(iterator_step, repetitions, stage_bench, **kwargs):
"""
For configuration and execution of the benchmark kernel.
Parameters
----------
iterator_step : tuple
tuple with elements from iterator built from build_iterator.
repetitions : list
number of repetitions for each execution
stage_bench : str
benchmark stage. Only: benchmark, pre-benchamrk
kwargs : keyword arguments
for configuration of the benchmark kernel
Returns
_______
metrics : pandas DataFrame
DataFrame with the desired metrics obtained for the integral computation
save_name : string
Desired name for saving the results of the execution
"""
# if n_qbits is None:
# raise ValueError("n_qbits CAN NOT BE None")
if stage_bench not in ['benchmark', 'pre-benchmark']:
raise ValueError(
"Valid values for stage_bench: benchmark or pre-benchmark'")
if repetitions is None:
raise ValueError("samples CAN NOT BE None")
#Here the code for configuring and execute the benchmark kernel
variable_0 = iterator_step[0]
variable_1 = iterator_step[1]
# Loop over the BTC step repetitions
for i in range(repetitions):
continue
metrics = pd.DataFrame()
# Deals with the file names for storing metrics
if stage_bench == 'pre-benchmark':
# Name for storing Pre-Benchmark results
save_name = "pre_benchmark.csv"
if stage_bench == 'benchmark':
# Name for storing Benchmark results
save_name = kwargs.get('csv_results')
#save_name = "pre_benchmark_step_{}.csv".format(n_qbits)
return metrics, save_name
def compute_samples(**kwargs):
"""
This functions computes the number of executions of the benchmark
for assure an error r with a confidence of alpha
Parameters
----------
kwargs : keyword arguments
For configuring the sampling computation
Returns
_______
samples : pandas DataFrame
DataFrame with the number of executions for each integration interval
"""
#Configuration for sampling computations
#Desired Confidence level
alpha = kwargs.get("alpha", 0.05)
#Code for computing the number of samples for getting the desired
#statististical significance. Depends on benchmark kernel
samples_ = pd.Series([100, 100])
samples_.name = "samples"
#If user wants limit the number of samples
#Minimum and Maximum number of samples
min_meas = kwargs.get("min_meas", 5)
max_meas = kwargs.get("max_meas", None)
samples_.clip(upper=max_meas, lower=min_meas, inplace=True)
return list(samples_)
def summarize_results(**kwargs):
"""
Create summary with statistics
"""
folder = kwargs.get("saving_folder")
csv_results = kwargs.get("csv_results")
#Code for summarize the benchamark results. Depending of the
#kernel of the benchmark
results = pd.DataFrame()
return results
class KERNEL_BENCHMARK:
"""
Class for execute a Kernerl benchmark
"""
def __init__(self, **kwargs):
"""
Method for initializing the class
"""
#Configurtion of benchmarked algorithm or routine
self.kwargs = kwargs
#Benchmark Configuration
#Repetitions for pre benchmark step
self.pre_samples = self.kwargs.get("pre_samples", 10)
#Saving pre benchmark step results
self.pre_save = self.kwargs.get("pre_save", True)
#For executing or not the benchmark step
self.pre_benchmark = self.kwargs.get("pre_benchmark", True)
#Name for saving the pre benchmark step results
self.save_name = self.kwargs.get("save_name", None)
#NNumber of qbits
self.list_of_qbits = self.kwargs.get("list_of_qbits", [4])
save_type = self.kwargs.get("save_append", True)
if save_type:
self.save_type = 'a'
else:
self.save_type = 'w'
#Create the iterator
self.iterator = build_iterator(**self.kwargs)
#Configure names for CSV files
self.saving_folder = self.kwargs.get("saving_folder")
self.benchmark_times = self.saving_folder + \
self.kwargs.get("benchmark_times")
self.csv_results = self.saving_folder + \
self.kwargs.get("csv_results")
self.summary_results = self.saving_folder + \
self.kwargs.get("summary_results")
#Attributes for metrics
self.pre_metrics = None
self.metrics = None
def save(self, save, save_name, input_pdf, save_mode):
"""
For saving panda DataFrames to csvs
Parameters
----------
save: bool
For saving or not
save_nam: str
name for file
input_pdf: pandas DataFrame
save_mode: str
saving mode: overwrite (w) or append (a)
"""
if save:
with open(save_name, save_mode) as f_pointer:
input_pdf.to_csv(
f_pointer,
mode=save_mode,
header=f_pointer.tell() == 0,
sep=';'
)
def exe(self):
"""
Execute complete Benchmark WorkFlow
"""
start_time = datetime.now().astimezone().isoformat()
for step_iterator in self.iterator:
#print("n_qbits: {}".format(n_qbits))
if self.pre_benchmark:
print("\t Executing Pre-Benchmark")
#Pre benchmark step
pre_metrics, pre_save_name = run_code(
step_iterator, self.pre_samples, 'pre-benchmark',
**self.kwargs
)
#For saving pre-benchmark step results
pre_save_name = self.saving_folder + pre_save_name
self.save(self.pre_save, pre_save_name, pre_metrics, "w")
#Using pre benchmark results for computing the number of
#repetitions
self.kwargs.update({"pre_metrics": pre_metrics})
#Compute needed samples for desired
#statistical significance
samples_ = compute_samples(**self.kwargs)
print("\t Executing Benchmark Step")
print("\t step samples: {}".format(samples_))
metrics, save_name = run_code(
step_iterator, samples_, 'benchmark', **self.kwargs
)
save_name = self.saving_folder + save_name
self.save(self.save, save_name, metrics, self.save_type)
end_time = datetime.now().astimezone().isoformat()
pdf_times = pd.DataFrame(
[start_time, end_time],
index=["StartTime", "EndTime"]
).T
#Saving Time Info
pdf_times.to_csv(self.benchmark_times)
#Summarize Results
results = summarize_results(**self.kwargs)
results.to_csv(self.summary_results)
if __name__ == "__main__":
import os
import shutil
benchmark_arguments = {
#Pre benchmark sttuff
"pre_benchmark": False,
"pre_samples": 10,
"pre_save": True,
#Saving stuff
"saving_folder": "./Results/",
"benchmark_times": "kernel_times_benchmark.csv",
"csv_results": "kernel_benchmark.csv",
"summary_results": "kernel_SummaryResults.csv",
#Computing Repetitions stuff
"alpha": 0.05,
"min_meas": 5,
"max_meas": 10,
#List number of qubits tested
"list_of_qbits": [4],#, 6, 8],
}
kernel_bench = KERNEL_BENCHMARK(**benchmark_arguments)
kernel_bench.exe()
|
NEASQC/WP3_Benchmark
|
tnbs/templates/my_benchmark_execution.py
|
my_benchmark_execution.py
|
py
| 8,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 239,
"usage_type": "call"
}
] |
5792679797
|
import json
import os
import magic
from io import BytesIO
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from django.db import models
from django.db.models.fields.related import ForeignObjectRel
from ..fields import JSONField
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from hvad.models import TranslatableModel, TranslatedFields
from PIL import Image
class BaseMediaFolder(TranslatableModel):
translations = TranslatedFields(
description=models.CharField(max_length=200, blank=True, null=True),
title=models.CharField(max_length=200, blank=True, null=True),
)
slug = models.SlugField()
creation_date = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
icon = models.ForeignKey(
"camomilla.Media",
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=_("Image cover"),
)
path = models.TextField(blank=True, null=True)
updir = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="child_folders",
null=True,
blank=True,
)
class Meta:
abstract = True
def update_childs(self):
for folder in self.child_folders.all():
folder.save()
def save(self, *args, **kwargs):
if self.updir:
if self.updir.id == self.id:
raise ValidationError({"updir": "Unvalid parent"})
self.path = "{0}/{1}".format(self.updir.path, self.slug)
else:
self.path = "/{0}".format(self.slug)
super(BaseMediaFolder, self).save(*args, **kwargs)
self.update_childs()
def __str__(self):
to_string = self.slug
if self.title:
to_string += " - " + self.title
return to_string
class MediaFolder(BaseMediaFolder):
translations = TranslatedFields()
class Media(TranslatableModel):
translations = TranslatedFields(
alt_text=models.CharField(max_length=200, blank=True, null=True),
title=models.CharField(max_length=200, blank=True, null=True),
description=models.TextField(blank=True, null=True),
)
file = models.FileField()
thumbnail = models.ImageField(
upload_to=getattr(settings, "THUMB_FOLDER", "thumbnails"),
max_length=500,
null=True,
blank=True,
)
created = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=200, blank=True, null=True)
size = models.IntegerField(default=0, blank=True, null=True)
mime_type = models.CharField(max_length=128, blank=True, null=True)
image_props = JSONField(default=dict, blank=True)
folder = models.ForeignKey(
MediaFolder,
null=True,
blank=True,
related_name="media_folder",
on_delete=models.CASCADE,
)
@property
def path(self):
return "%s/%s" % (self.folder.path, self.name)
@property
def is_image(self):
return self.mime_type and self.mime_type.startswith("image")
def image_preview(self):
if self.file:
return mark_safe('<img src="{0}" />'.format(self.file.url))
def image_thumb_preview(self):
if self.thumbnail:
return mark_safe('<img src="{0}" />'.format(self.thumbnail.url))
image_preview.short_description = _("Preview")
image_thumb_preview.short_description = _("Thumbnail")
class Meta:
ordering = ["-pk"]
def regenerate_thumbnail(self):
if self.file:
self._make_thumbnail()
def get_foreign_fields(self):
return [
field.get_accessor_name()
for field in self._meta.get_fields()
if issubclass(type(field), ForeignObjectRel)
]
@property
def json_repr(self):
json_r = {
"id": self.pk,
"thumbnail": "" if not self.is_image else self.thumbnail.url,
"label": self.__str__(),
}
return json.dumps(json_r)
def _make_thumbnail(self):
try:
fh = storage.open(self.file.name, "rb")
self.mime_type = magic.from_buffer(fh.read(2048), mime=True)
except FileNotFoundError as ex:
print(ex)
self.image_props = {}
self.mime_type = ""
return False
try:
orig_image = Image.open(fh)
image = orig_image.copy()
self.image_props = {
"width": orig_image.width,
"height": orig_image.height,
"format": orig_image.format,
"mode": orig_image.mode,
}
except Exception as ex:
print(ex)
return False
try:
image.thumbnail(
(
getattr(settings, "CAMOMILLA_THUMBNAIL_WIDTH", 50),
getattr(settings, "CAMOMILLA_THUMBNAIL_HEIGHT", 50),
),
Image.ANTIALIAS,
)
fh.close()
# Path to save to, name, and extension
thumb_name, thumb_extension = os.path.splitext(self.file.name)
thumb_extension = thumb_extension.lower()
thumb_filename = thumb_name + "_thumb" + thumb_extension
temp_thumb = BytesIO()
image.save(temp_thumb, "PNG", optimize=True)
temp_thumb.seek(0)
# Load a ContentFile into the thumbnail field so it gets saved
self.thumbnail.save(
thumb_filename, ContentFile(temp_thumb.read()), save=False
)
temp_thumb.close()
except Exception:
return False
return True
def _remove_file(self):
if self.file:
file_to_remove = os.path.join(settings.MEDIA_ROOT, self.file.name)
if os.path.isfile(file_to_remove):
os.remove(file_to_remove)
def _remove_thumbnail(self):
if self.thumbnail:
file_to_remove = os.path.join(settings.MEDIA_ROOT, self.thumbnail.name)
if os.path.isfile(file_to_remove):
os.remove(file_to_remove)
def _get_file_size(self):
if self.file:
file_to_calc = os.path.join(settings.MEDIA_ROOT, self.file.name)
if os.path.isfile(file_to_calc):
return self.file.size
else:
return 0
def __str__(self):
if self.name:
return self.name
return self.file.name
@receiver(post_save, sender=Media, dispatch_uid="make thumbnails")
def update_media(sender, instance, **kwargs):
instance._remove_thumbnail()
instance._make_thumbnail()
Media.objects.filter(pk=instance.pk).update(
size=instance._get_file_size(),
thumbnail=instance.thumbnail,
mime_type=instance.mime_type,
image_props=instance.image_props,
)
@receiver(pre_delete, sender=Media, dispatch_uid="make thumbnails")
def delete_media_files(sender, instance, **kwargs):
instance._remove_thumbnail()
instance._remove_file()
|
lotrekagency/camomilla
|
camomilla/models/media.py
|
media.py
|
py
| 7,378 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "hvad.models.TranslatableModel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "hvad.models.TranslatedFields",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "hvad.models.TranslatedFields",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "hvad.models.TranslatableModel",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "hvad.models.TranslatedFields",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "django.db.models.FileField",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "fields.JSONField",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "django.db.models.fields.related.ForeignObjectRel",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "json.dumps",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.open",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "magic.from_buffer",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 168,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 169,
"usage_type": "argument"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.core.files.base.ContentFile",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 221,
"usage_type": "argument"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_delete",
"line_number": 233,
"usage_type": "argument"
}
] |
31180641489
|
import dash
import math
from flask import Markup
from flask import render_template
import matplotlib.pyplot as plt
from flask import Flask, jsonify, request
from dash.dependencies import Output, Event, Input
import dash_core_components as dcc
import dash_html_components as html
import plotly
import random
import plotly.graph_objs as go
from collections import deque
import sqlite3
import pandas as pd
app=Flask(__name__)
@app.route('/alltrends',methods=['GET'])
def all_list():
print("inter")
conn = sqlite3.connect('twitter4.db')
c = conn.cursor()
df = pd.read_sql("SELECT * FROM world_trend_data", conn)
out = df.to_json(orient='records')[1:-1].replace('},{', '} {')
print("compl")
print(out)
return out
@app.route('/<string:name>',methods=['GET'])
def bar(name):
conn=sqlite3.connect('twitter4.db')
c=conn.cursor()
pf=pd.read_sql("SELECT name,tweet_volume FROM "+name,conn)
name=list(pf.name)
tweet_volume=list(pf.tweet_volume)
labels=[]
values=[]
#print(tweet_volume)
for x in range(0,len(name)):
if math.isnan(tweet_volume[x])==False:
labels.append(name[x])
values.append(tweet_volume[x])
print(labels)
print(values)
#labels = ["January","February","March","April","May","June","July","August"]
#values = [10,9,8,7,6,4,7,8]
return render_template('bar.html', values=values, labels=labels)
@app.route('/place/<string:name>',methods=['GET'])
def all_data(name):
conn=sqlite3.connect('twitter4.db')
c=conn.cursor()
df=pd.read_sql("SELECT name,query,tweet_volume FROM "+name,conn)
out = df.to_json(orient='records')[1:-1].replace('},{', '} {')
return out
if __name__=='__main__':
app.run(debug=True,port=8080)
|
ravirajsingh-knit/real-time-twitter-sentiment-analysis
|
main task/api2.py
|
api2.py
|
py
| 1,656 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 54,
"usage_type": "call"
}
] |
22426413086
|
from flask import Flask, request, jsonify
import requests
import json
import os
import feedparser
from dotenv import load_dotenv
import random
from datetime import date
load_dotenv()
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def root_post():
print(request)
return jsonify(text="リクエスト成功")
@app.route("/listening", methods=["GET", "POST"])
def hear():
# https://werkzeug.palletsprojects.com/en/0.15.x/wrappers/#werkzeug.wrappers.json.JSONMixin.get_json
request_obj = request.get_json()
if request_obj is None:
return jsonify({})
if "challenge" in request_obj:
challenge_token = request_obj["challenge"]
return jsonify(challenge=challenge_token)
if "event" in request_obj:
event = request_obj["event"]
print(event)
bot_id = event.get("bot_id")
if bot_id == "B010WR2FE2C":
return jsonify({})
endpoint = os.environ["SLACK_WEBHOOK"]
if "text" not in request_obj["event"]:
return jsonify({})
text = request_obj["event"]["text"]
if "<@U010KB4S65R>" not in text:
# メンションじゃない場合は無視する
return jsonify({})
if "占い" in text or "うらない" in text or "うらなって" in text or "占って":
request_date = date.today().isoformat().replace("-", "/")
fortune_endpoint = (
f"http://api.jugemkey.jp/api/horoscope/free/{request_date}"
)
fortune_res = requests.get(fortune_endpoint)
fortune = fortune_res.json()
if fortune_res.status_code != 200 or fortune is None:
payload = {"text": f"<@{user}> 占いに失敗しました"}
requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
# fortune["horoscope"]["2020/03/28"] をするコード
sign_list = fortune.get("horoscope", {}).get(request_date, {})
if sign_list == {}:
payload = {"text": f"<@{user}> 占いに失敗しました"}
requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
messages = [sign["sign"] + ": " + sign["content"] for sign in sign_list]
messages.append("powerd by JugemKey 【PR】原宿占い館 塔里木")
payload = {"text": "\n".join(messages)}
res = requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
user = event["user"]
RSS_URL = "https://b.hatena.ne.jp/hotentry/it.rss"
d = feedparser.parse(RSS_URL)
text = "\n".join(
[f"{entry.title}: {entry.link}" for entry in random.sample(d.entries, 3)]
)
payload = {"text": f"<@{user}> {text}"}
res = requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
|
tamanobi/benri-slackbot
|
index.py
|
index.py
|
py
| 2,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "feedparser.parse",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 80,
"usage_type": "call"
}
] |
23561493561
|
import scipy
import datetime
import matplotlib.pyplot as plt
import sys
from loader import DataLoader
import numpy as np
import os
from keras.datasets import mnist
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from models.refiner import build_refiner
from models.classifier import build_classifier
from models.discriminator import build_discriminator, build_feature_discriminator
from models.encoder import build_encoder
class CGAN():
def __init__(self):
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.n_features = 128
self.n_classes = 31
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols), n_classes=self.n_classes)
optimizer = Adam(0.0002, 0.5)
self.D_R = build_discriminator(self.img_shape)
self.D_F = build_feature_discriminator(self.n_features)
self.D_R.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.D_F.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.Refiner = build_refiner(self.img_shape, self.channels)
self.Feature = build_encoder(self.img_shape, self.n_features)
self.Classifier = build_classifier(self.n_features, self.n_classes)
self.D_R.trainable = False
self.D_F.trainable = False
self.Classifier.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.Classifier.trainable = False
self.GAN_1 = Sequential()
self.GAN_1.add(self.Refiner)
self.GAN_1.add(self.D_R)
self.GAN_1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.GAN_2 = Sequential()
self.GAN_2.add(self.Refiner)
self.GAN_2.add(self.Feature)
self.GAN_2.add(self.D_F)
self.GAN_2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.GAN_3 = Sequential()
self.GAN_3.add(self.Refiner)
self.GAN_3.add(self.Feature)
self.GAN_3.add(self.Classifier)
self.GAN_3.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
def train(self, epochs, batch_size=1, interval=50):
start_time = datetime.datetime.now()
valid = np.ones((batch_size,))
refined = np.zeros((batch_size,))
for epoch in range(epochs):
for batch_i, (imgs_sim, imgs_target, classes) in enumerate(self.data_loader.load_batch(batch_size)):
imgs_refined = self.Refiner.predict(imgs_sim)
feature_sim = self.Feature.predict(imgs_sim)
feature_target = self.Feature.predict(imgs_target)
feature_refined = self.Feature.predict(imgs_refined)
dimg_loss_real = self.D_R.train_on_batch(imgs_target, valid)
dimg_loss_refined = self.D_R.train_on_batch(imgs_refined, refined)
dimg_loss = 0.5 * np.add(dimg_loss_real, dimg_loss_refined)
dfeature_loss_real = self.D_F.train_on_batch(feature_target, valid)
dfeature_loss_refined = self.D_F.train_on_batch(feature_refined, refined)
dfeature_loss = 0.5 * np.add(dfeature_loss_real, dfeature_loss_refined)
class_loss = self.Classifier.train_on_batch(feature_sim, classes)
gan1_loss = self.GAN_1.train_on_batch(imgs_sim, valid)
gan2_loss = self.GAN_2.train_on_batch(imgs_sim, valid)
gan3_loss = self.GAN_3.train_on_batch(imgs_sim, classes)
elapsed_time = datetime.datetime.now() - start_time
print ("[Epoch %d/%d] [targetatch %d/%d] [DR loss: %f] [DF loss: %f] [C loss: %f] [GAN_1 loss %f] [GAN_2 loss %f] [GAN_3 loss %f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
dimg_loss[0],
dfeature_loss[0],
class_loss[0],
gan1_loss[0],
gan2_loss[0],
gan3_loss[0],
elapsed_time))
if batch_i % interval == 0:
self.sample_images(epoch, batch_i)
def sample_images(self, epoch, batch_i):
os.makedirs('output/', exist_ok=True)
r, c = 1, 3
imgs_sim = self.data_loader.load_data(domain="sim", batch_size=1, is_testing=True)
imgs_target = self.data_loader.load_data(domain="target", batch_size=1, is_testing=True)
imgs_refined = self.Refiner.predict(imgs_sim)
gen_imgs = np.concatenate([imgs_sim, imgs_refined, imgs_target])
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Simulated', 'Refined','Target']
fig, axs = plt.subplots(r, c)
axs[0].imshow(gen_imgs[0])
axs[0].set_title(titles[0])
axs[0].axis('off')
axs[1].imshow(gen_imgs[1])
axs[1].set_title(titles[1])
axs[1].axis('off')
axs[2].imshow(gen_imgs[2])
axs[2].set_title(titles[2])
axs[2].axis('off')
fig.savefig("output/%d_%d.png" % (epoch, batch_i))
plt.close()
if __name__ == '__main__':
cgan = CGAN()
cgan.train(epochs=100, batch_size=8, interval=50)
|
faniyamokhayyeri/C-GAN
|
cgan.py
|
cgan.py
|
py
| 6,395 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "loader.DataLoader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.Adam",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.discriminator.build_discriminator",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.discriminator.build_feature_discriminator",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.refiner.build_refiner",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.encoder.build_encoder",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.classifier.build_classifier",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 146,
"usage_type": "name"
}
] |
12772858510
|
import argparse
import os
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
from .model import (
rnn_regression_model,
rnn_classification_model,
compile_regression_model,
compile_classification_model,
)
from .transform import (
sequence_embedding,
normalize, denormalize,
make_dataset_balanced,
one_hot_encode_classes,
split_train_test_set,
)
from .load import load_rna_structure_dataset, load_rna_nucleotides_dataset
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument('learning_type', choices=['regression', 'classification'])
parser.add_argument('rna_type', choices=['mrna', 'trna', 'rrna'])
parser.add_argument('alphabet', choices=['nucleotides', '2d_structure'])
parser.add_argument('--resume', type=int, default=0)
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_epochs', type=int, default=10)
parser.add_argument('--seed', type=int, default=444)
args = parser.parse_args()
learning_type = args.learning_type
rna_type = args.rna_type
alphabet_type = args.alphabet
learning_rate = args.learning_rate
batch_size = args.batch_size
n_epochs = args.n_epochs
resume = args.resume
seed = args.seed
np.random.seed(seed)
if alphabet_type == 'nucleotides':
alphabet = ['A', 'T', 'G', 'C']
else:
alphabet = ['.', '(', ')']
if learning_type == 'regression':
regression(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed)
else:
classification(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed)
def regression(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed):
alphabet_size = len(alphabet)
metadata_path = f'data/tab/{rna_type}.tab'
sequences_folder = 'data/seq/'
output_folder = os.path.join(os.getcwd(), 'saved_models_rnatemp', f'seed_{seed}')
output_path = os.path.join(output_folder, f'{rna_type}_regression.h5')
try:
os.makedirs(output_folder)
except FileExistsError:
pass
log_dir = os.path.join(os.getcwd(), 'summary_log', f'seed_{seed}', 'regression')
try:
os.makedirs(log_dir)
except FileExistsError:
pass
logger.info('Building model')
model = rnn_regression_model(alphabet_size=alphabet_size, n_lstm=2)
compile_regression_model(model, learning_rate=learning_rate)
if resume > 0:
logger.info(f'Resuming from {output_path}')
model.load_weights(output_path)
logger.info('Loading data')
metadata = pd.read_csv(metadata_path, delimiter='\t')
metadata['category'] = metadata['temp.cat']
y, metadata = make_dataset_balanced(
metadata,
output_col='temp',
)
y = y.astype(np.float32)
if 'A' in alphabet:
sequences = load_rna_nucleotides_dataset(metadata, sequences_folder)
else:
sequences = load_rna_structure_dataset(metadata, sequences_folder)
x = sequence_embedding(sequences, alphabet)
logger.info('Split train and test set')
x_train, y_train, x_test, y_test = split_train_test_set(x, y, test_ratio=0.2)
mean, std = np.mean(y), np.std(y)
y_test_norm = normalize(y_test, mean, std)
y_train_norm = normalize(y_train, mean, std)
initial_epoch = 0
epochs = n_epochs
if resume > 0:
initial_epoch = resume
epochs += initial_epoch
logger.info('Training')
model.fit(
x_train,
y_train_norm,
validation_data=(x_test, y_test_norm),
batch_size=batch_size,
epochs=epochs,
initial_epoch=initial_epoch,
verbose=1,
callbacks=[
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
update_freq='epoch',
embeddings_freq=0,
),
],
)
model.save(output_path)
logger.info(f'Model saved to {output_path}')
def classification(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed):
alphabet_size = len(alphabet)
classes = ['psychrophile', 'mesophile', 'thermophile', 'hyperthermophile']
n_classes = len(classes)
metadata_path = f'data/tab/{rna_type}.tab'
sequences_folder = 'data/seq/'
output_folder = os.path.join(os.getcwd(), 'saved_models_rnatemp', f'seed_{seed}')
output_path = os.path.join(output_folder, f'{rna_type}_classification.h5')
try:
os.makedirs(output_folder)
except FileExistsError:
pass
log_dir = os.path.join(os.getcwd(), 'summary_log', f'seed_{seed}', 'classification')
try:
os.makedirs(log_dir)
except FileExistsError:
pass
logger.info('Building model')
model = rnn_classification_model(alphabet_size=alphabet_size, n_classes=n_classes, n_lstm=2)
compile_classification_model(model, learning_rate=learning_rate)
if resume > 0:
logger.info(f'Resuming from {output_path}')
model.load_weights(output_path)
logger.info('Loading data')
metadata = pd.read_csv(metadata_path, delimiter='\t')
metadata['category'] = metadata['temp.cat']
n_entries_per_class = 153
y_str, metadata = make_dataset_balanced(metadata)
y = one_hot_encode_classes(y_str, classes)
if 'A' in alphabet:
sequences = load_rna_nucleotides_dataset(metadata, sequences_folder)
else:
sequences = load_rna_structure_dataset(metadata, sequences_folder)
x = sequence_embedding(sequences, alphabet)
logger.info('Split train and test set')
x_train, y_train, x_test, y_test = split_train_test_set(x, y, test_ratio=0.2)
initial_epoch = 0
epochs = n_epochs
if resume > 0:
initial_epoch = resume
epochs += initial_epoch
logger.info('Training')
model.fit(
x_train,
y_train,
validation_data=(x_test, y_test),
batch_size=batch_size,
epochs=epochs,
initial_epoch=initial_epoch,
verbose=1,
callbacks=[
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
update_freq='epoch',
embeddings_freq=0,
),
],
)
model.save(output_path)
logger.info(f'Model saved to {output_path}')
if __name__ == '__main__':
main()
|
srom/rna_learn
|
rna_learn/archive/rnatemp_main.py
|
rnatemp_main.py
|
py
| 6,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "model.rnn_regression_model",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "model.compile_regression_model",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "model.load_weights",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "transform.make_dataset_balanced",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "load.load_rna_nucleotides_dataset",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "load.load_rna_structure_dataset",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "transform.sequence_embedding",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "transform.split_train_test_set",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "transform.normalize",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "transform.normalize",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "model.fit",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.TensorBoard",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "model.save",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "model.rnn_classification_model",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "model.compile_classification_model",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "model.load_weights",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "transform.make_dataset_balanced",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "transform.one_hot_encode_classes",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "load.load_rna_nucleotides_dataset",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "load.load_rna_structure_dataset",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "transform.sequence_embedding",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "transform.split_train_test_set",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "model.fit",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.TensorBoard",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "model.save",
"line_number": 220,
"usage_type": "call"
}
] |
7029192101
|
import argparse
import time
import os
import cv2
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torch.autograd import Variable
import models_x
class ImageAdaptive3DModel(nn.Module):
def __init__(self, dim=33):
super().__init__()
self.classifier = models_x.Classifier()
self.lut_0 = models_x.Generator3DLUT_identity()
self.lut_1 = models_x.Generator3DLUT_zero()
self.lut_2 = models_x.Generator3DLUT_zero()
self.trilinear_ = models_x.TrilinearInterpolation()
def load_weights(self, lut_weights="pretrained_models/sRGB/LUTs.pth", classifier_weights="pretrained_models/sRGB/classifier.pth"):
assert os.path.exists(lut_weights), "Unable to find lut weights"
assert os.path.exists(classifier_weights), "Unable to find classifier weights"
classifier_state_dict = torch.load(classifier_weights)
self.classifier.load_state_dict(classifier_state_dict)
luts_state_dict = torch.load(lut_weights)
self.lut_0.load_state_dict(luts_state_dict["0"])
self.lut_1.load_state_dict(luts_state_dict["1"])
self.lut_2.load_state_dict(luts_state_dict["2"])
def forward(self, image_input):
pred = self.classifier(image_input).squeeze()
final_lut = pred[0] * self.lut_0.LUT + pred[1] * self.lut_1.LUT + pred[2] * self.lut_2.LUT
combine_A = image_input.new(image_input.size())
combine_A = self.trilinear_(final_lut, image_input)
return combine_A
class ImageAdaptive3DUnpairedModel(nn.Module):
def __init__(self, dim=33):
super().__init__()
self.classifier = models_x.Classifier_unpaired()
self.lut_0 = models_x.Generator3DLUT_identity()
self.lut_1 = models_x.Generator3DLUT_zero()
self.lut_2 = models_x.Generator3DLUT_zero()
def load_weights(self, lut_weights="pretrained_models/sRGB/LUTs_unpaired.pth", classifier_weights="pretrained_models/sRGB/classifier_unpaired.pth"):
assert os.path.exists(lut_weights), "Unable to find lut weights"
assert os.path.exists(classifier_weights), "Unable to find classifier weights"
classifier_state_dict = torch.load(classifier_weights)
self.classifier.load_state_dict(classifier_state_dict)
luts_state_dict = torch.load(lut_weights)
self.lut_0.load_state_dict(luts_state_dict["0"])
self.lut_1.load_state_dict(luts_state_dict["1"])
self.lut_2.load_state_dict(luts_state_dict["2"])
def forward(self, image_input):
pred = self.classifier(image_input).squeeze()
combine_A = pred[0] * self.lut_0(image_input) + pred[1] * self.lut_1(image_input) + pred[2] * self.lut_2(image_input)
# Standardize because paired model returns (LUT, output)
return None, combine_A
def pre_process(image: np.array, device: str) -> torch.tensor:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.astype(np.float32) / 255.
image = torch.from_numpy(np.ascontiguousarray(np.transpose(image, (2, 0, 1)))).float().unsqueeze(0)
# image = torch.stack([image])
image = image.to(device)
return image
def post_process(output_tensor):
image_rgb = output_tensor.cpu().squeeze().permute(1, 2, 0).numpy()
image_rgb = (image_rgb * 255.0).clip(0, 255).astype(np.uint8)
image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
return image_bgr
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str, required=True, help="Path to input folder containing images")
parser.add_argument("--output_dir", type=str, required=True, help="Path to output folder")
parser.add_argument("--device", type=str, default="cuda:0", help="Device to use e.g. 'cuda:0', 'cuda:1', 'cpu'")
parser.add_argument("--unpaired", action="store_true", help="Evaluate model trained with unpaired data")
args = parser.parse_args()
# Prepare output directory if it doesn't exist
os.makedirs(args.output_dir, exist_ok=True)
# Load model and weights
model = ImageAdaptive3DModel() if not args.unpaired else ImageAdaptive3DUnpairedModel()
model.load_weights()
model.eval()
model.to(args.device)
# Prepare images
image_paths = [os.path.join(args.input_dir, img_path) for img_path in os.listdir(args.input_dir) if img_path[0] != "."]
# Model inference
with torch.no_grad():
description = "Running 3D-LUT..." if not args.unpaired else "Running 3D-LUT(unpaired)..."
for img_path in tqdm(image_paths, total=len(image_paths), desc=description):
in_image = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
model_input = pre_process(in_image, args.device)
_, model_output = model(model_input)
enhanced_image = post_process(model_output)
output_path = os.path.join(args.output_dir, os.path.basename(img_path))
cv2.imwrite(output_path, enhanced_image)
|
shaunhwq/Image-Adaptive-3DLUT
|
demo_3dlut.py
|
demo_3dlut.py
|
py
| 5,091 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models_x.Classifier",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_identity",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models_x.TrilinearInterpolation",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "models_x.Classifier_unpaired",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_identity",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 127,
"usage_type": "call"
}
] |
43969738146
|
#!/usr/bin/env python
import argparse
import sys
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation
from CPT_GFFParser import gffSeqFeature, gffWrite
bottomFeatTypes = ["exon", "RBS", "CDS"]
def makeGffFeat(inFeat, num, recName, identifier):
if inFeat.type == "RBS" or (inFeat.type == "regulatory" and "regulatory_class" in inFeat.qualifiers.keys() and inFeat.qualifiers["regulatory_class"][0] == "ribosome_binding_site"):
inFeat.type = "Shine_Dalgarno_sequence"
if "codon_start" in inFeat.qualifiers.keys():
shift = int(inFeat.qualifiers["codon_start"][0]) - 1
else:
shift = "."
if identifier in inFeat.qualifiers.keys():
name = inFeat.qualifiers[identifier][0] + "." + inFeat.type
if num > 0:
name += "." + str(num)
else:
name = recName + "." + inFeat.type + "." + str(num)
outFeat = gffSeqFeature(inFeat.location, inFeat.type, '', inFeat.strand, name, inFeat.qualifiers, None, None, None, shift, 0, "GbkToGff")
outFeat.qualifiers["ID"] = [name]
return outFeat
def main(inFile, makeMRNA, makeGene, identifier, fastaFile, outFile):
ofh = sys.stdout
if outFile:
ofh = outFile
outRec = []
failed = 0
for rec in SeqIO.parse(inFile, "genbank"):
recID = rec.name
if len(str(rec.seq)) > 0:
seqs_pending_writes = True
outSeq = str(rec.seq)
seqLen = len(outSeq)
locBucket = {}
outFeats = []
topTypeDict = {}
seekingParent = []
geneNum = 0
autoGeneNum = 0
for feat in rec.features:
if identifier not in feat.qualifiers.keys(): #Allow metadata features and other features with no ID (Output warning?) - AJC
if feat.type in bottomFeatTypes:
seekingParent.append([feat, [], []]) # [Feature, all parent candidates, strongest parent candidates]
continue
elif feat.type not in topTypeDict.keys():
topTypeDict[feat.type] = 1
else:
topTypeDict[feat.type] += 1
outFeats.append(makeGffFeat(feat, topTypeDict[feat.type], recID, identifier))
continue
elif feat.qualifiers[identifier][0] not in locBucket.keys():
locBucket[feat.qualifiers[identifier][0]] = []
locBucket[feat.qualifiers[identifier][0]].append(feat)
for locus in locBucket.keys():
minLoc = locBucket[locus][0].location.start
maxLoc = locBucket[locus][0].location.end
for feat in locBucket[locus]:
minLoc = min(minLoc, feat.location.start)
maxLoc = max(maxLoc, feat.location.end)
for x in seekingParent:
if x[0].location.start >= minLoc and x[0].location.end <= maxLoc:
x[1].append(locus)
if x[0].location.start == minLoc or x[0].location.end == maxLoc:
x[2].append(locus)
for x in seekingParent: #Reformat to [Feature, Locus, Unused/Free]
if len(x[2]) == 1:
finList = ""
if len(x[1]) > 1:
for loc in x[1]:
if loc != x[2][0]:
finList += loc + ", "
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived. Other, weaker candidate(s) were " + finList[0:-2] + "."
else:
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived."
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
x[0].qualifiers["Notes"].append(finList)
x[1] = x[2][0]
elif len(x[2]) > 1:
candidate = x[2][0] #Arbitrarily choose first one
finList = ""
strongList = ""
for loc in x[2]:
if loc != candidate:
finList += loc + ", "
strongList += loc + ", "
for loc in x[1]:
if loc not in x[2]:
finList += loc + ", "
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived. Other candidate(s) were " + finList[0:-2] + " (Equally strong candidate(s): " + strongList[0:-2] + ")."
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
x[0].qualifiers["Notes"].append(finList)
x[1] = candidate
elif len(x[1]) == 1:
x[1] = x[1][0]
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived."
x[0].qualifiers["Notes"].append(finList)
elif len(x[1]) > 1:
candidate = x[1][0] #Arbitrarily choose first one
finList = ""
for loc in x[1]:
if loc != candidate:
finList += loc + ", "
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived. Other candidates were " + finList[0:-2] + "."
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
x[0].qualifiers["Notes"].append(finList)
x[1] = candidate
else:
if makeGene:
sys.stderr.write("Warning: Unable to find potential parent for feature with no " + identifier + " of type " + str(x[0].type) + " at location [" + str(x[0].location.start + 1) + ", " + str(x[0].location.end) + "], creating standalone gene.\n")
autoGeneNum += 1
x[0].source = "GbkToGff"
x[0].score = 0
x[0].shift = 0
if "ID" not in x[0].qualifiers.keys():
x[0].qualifiers["ID"] = [recID + ".standalone_" + x[0].type + "." + str(autoGeneNum)]
tempName = recID + ".derived_Gene." + str(autoGeneNum)
tempQuals = {"ID" : [tempName], "Notes" : ["Gene feature automatically generated by Gbk to GFF conversion"]}
tempGene = gffSeqFeature(FeatureLocation(x[0].location.start, x[0].location.end, x[0].location.strand), 'gene', '', x[0].strand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff")
if makeMRNA:
tempName = recID + ".derived_mRNA." + str(autoGeneNum)
tempQuals = {"ID" : [tempName], "Notes" : ["mRNA feature automatically generated by Gbk to GFF conversion"]}
tempGene.sub_features.append(gffSeqFeature(FeatureLocation(x[0].location.start, x[0].location.end, x[0].location.strand), 'mRNA', '', x[0].strand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff"))
tempGene.sub_features[-1].sub_features.append(x[0])
else:
tempGene.sub_features.append(x[0])
outFeats.append(tempGene)
else:
sys.stderr.write("Warning: Unable to find potential parent for feature with no " + identifier + " of type " + str(x[0].type) + " at location [" + str(x[0].location.start + 1) + ", " + str(x[0].location.end) + "].\n")
if x[0].type not in topTypeDict.keys():
topTypeDict[x[0].type] = 1
else:
topTypeDict[x[0].type] += 1
outFeats.append(makeGffFeat(x[0], topTypeDict[x[0].type], recID, identifier))
for locus in locBucket.keys():
if len(locBucket[locus]) == 1: # No heirarchy to be made
outFeats.append(makeGffFeat(locBucket[locus][0], 0, recID, identifier))
continue
topFeat = None
midFeat = None
bottomFeats = []
typeDict = {}
minLoc = locBucket[locus][0].location.start
maxLoc = locBucket[locus][0].location.end
geneNum += 1
for feat in locBucket[locus]:
# If we want to make our own top-level feat?
minLoc = min(minLoc, feat.location.start)
maxLoc = max(maxLoc, feat.location.end)
# Gene->mRNA->CDS included as example, to add other feature-heirarchys in the appropriate slot
if feat.type in ['gene']:
if not topFeat:
topFeat = feat
# Else handle multiple top features
elif feat.type in ['mRNA', 'tRNA', 'rRNA']:
if not midFeat:
midFeat = feat
# Else handle multiple mid feats (May need another elif type-in-list statement if we actually expect a list of mid feats)
else:
if feat.type not in typeDict.keys():
typeDict[feat.type] = 1
else:
typeDict[feat.type] += 1
bottomFeats.append(feat)
for x in seekingParent:
if type(x[1]) != "list" and locus == x[1]:
x[0].qualifiers[identifier] = [locus]
bottomFeats.append(x[0])
if x[0].type not in typeDict.keys():
typeDict[x[0].type] = 1
else:
typeDict[x[0].type] += 1
#if not topFeat: # Make our own top-level feature based off minLoc, maxLoc bounds
for x in typeDict.keys(): # If only 1, set it to 0 so we don't append a number to the name
if typeDict[x] == 1: # Else, set to 1 so that we count up as we encounter the features
typeDict[x] = 0
else:
typeDict[x] = 1
if not topFeat:
if makeGene:
if midFeat:
possibleStrand = midFeat.strand
else:
possibleStrand = bottomFeats[0].strand
tempName = recID + ".gene." + str(geneNum)
tempQuals = {identifier : [locus], "ID" : [tempName], "Notes" : ["Gene feature automatically generated by Gbk to GFF conversion"]}
topFeat = gffSeqFeature(FeatureLocation(minLoc, maxLoc, possibleStrand), 'gene', '', possibleStrand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff")
else:
sys.stderr.write("Unable to create a feature heirarchy at location [%d, %d] with features: \n" % (minLoc, maxLoc))
for x in locBucket[locus]:
sys.stderr.write(str(x))
sys.stderr.write('\n')
failed = 1
continue
outFeats.append(makeGffFeat(topFeat, 0, recID, identifier))
if not midFeat and topFeat.type == "gene" and makeMRNA:
if identifier in topFeat.qualifiers.keys():
tempName = topFeat.qualifiers[identifier][0] + ".mRNA"
tempQuals = {identifier : topFeat.qualifiers[identifier], "ID" : [tempName], "Notes" : ["mRNA feature automatically generated by Gbk to GFF conversion"]}
else:
tempName = outFeats[-1].ID + ".mRNA"
tempQuals = {identifier : topFeat.qualifiers[identifier], "ID" : [tempName], "Notes" : ["mRNA feature automatically generated by Gbk to GFF conversion"]}
midFeat = gffSeqFeature(FeatureLocation(minLoc, maxLoc, topFeat.strand), 'mRNA', '', topFeat.strand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff")
if midFeat: # Again, need a new if statement if we want to handle multiple mid-tier features
outFeats[-1].sub_features.append(makeGffFeat(midFeat, 0, recID, identifier))
outFeats[-1].sub_features[-1].qualifiers["Parent"] = [outFeats[-1].id]
for x in bottomFeats:
typeDict[x.type] += 1
outFeats[-1].sub_features[-1].sub_features.append(makeGffFeat(x, typeDict[x.type], recID, identifier))
outFeats[-1].sub_features[-1].sub_features[-1].qualifiers["Parent"] = [outFeats[-1].sub_features[-1].id]
else: # No midFeat, append bottom feats directly to top feats
for x in bottomFeats:
typeDict[x.type] += 1
outFeats[-1].sub_features.append(makeGffFeat(x, typeDict[x.type], recID, identifier))
outFeats[-1].sub_features[-1].qualifiers["Parent"] = [outFeats[-1].id]
outRec.append(SeqRecord(rec.seq, recID, rec.name, rec.description, rec.dbxrefs, sorted(outFeats, key=lambda x: x.location.start), rec.annotations, rec.letter_annotations))
SeqIO.write([outRec[-1]], fastaFile, "fasta")
gffWrite(outRec, ofh)
exit(failed) # 0 if all features handled, 1 if unable to handle some
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Biopython solution to Gbk to GFF conversion')
parser.add_argument('inFile', type=argparse.FileType("r"), help='Path to an input GBK file' )
parser.add_argument('--makeMRNA', action="store_true", required=False, help="Automatically create mRNA features")
parser.add_argument('--makeGene', action="store_true", required=False, help="Automatically create missing Gene features")
parser.add_argument('--identifier', type=str, default="locus_tag", required=False, help="Qualifier to derive ID property from")
parser.add_argument('--fastaFile', type=argparse.FileType("w"), help='Fasta output for sequences' )
parser.add_argument('--outFile', type=argparse.FileType("w"), help='GFF feature output' )
args = parser.parse_args()
main(**vars(args))
|
TAMU-CPT/galaxy-tools
|
tools/gbk/gbk_to_gff3.py
|
gbk_to_gff3.py
|
py
| 13,589 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "Bio.SeqRecord.SeqRecord",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO.write",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "CPT_GFFParser.gffWrite",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 264,
"usage_type": "call"
}
] |
31235810811
|
from django.urls import path, include
from rest_framework import routers
from aluraflix.views import VideoViewSet, CategoriaViewSet, CategoriaVideosViewSet, VideosFreeViewSet
router = routers.DefaultRouter()
router.register('videos', VideoViewSet, basename='videos')
router.register('categorias', CategoriaViewSet, basename='categorias')
urlpatterns = [
path('videos/free/', VideosFreeViewSet.as_view(), name='videos_free_list'),
path('categorias/<int:id>/videos/', CategoriaVideosViewSet.as_view(), name='videos_categoria_list'),
path('', include(router.urls)),
]
|
diegoamferreira/challange_alura_be1
|
aluraflix/urls.py
|
urls.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "aluraflix.views.VideoViewSet",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "aluraflix.views.CategoriaViewSet",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.VideosFreeViewSet.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.VideosFreeViewSet",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.CategoriaVideosViewSet.as_view",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.CategoriaVideosViewSet",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 13,
"usage_type": "call"
}
] |
3919544072
|
# standard python libraries
import os
import re
import csv
import json
import operator
import statistics
import collections
from operator import itemgetter
# custom libraries
from webxray.Analyzer import Analyzer
from webxray.Utilities import Utilities
class Reporter:
"""
Manages the production of a number of CSV reports.
"""
def __init__(self, db_name, db_engine, num_tlds, num_results, tracker_threshold = None, flush_domain_owners = True, start_date = False, end_date = False):
"""
This performs a few start-up tasks:
- sets up some useful global variables
- makes sure we have a directory to store the reports
- flushes the existing domain_owner mappings (this can be disabled)
- if we want to do per-tld reports, figures out the most common
- if we want to filter against a given tracker threshold, sets it
up here (see documentation below for tracker threshold)
"""
# set various global vars
self.db_name = db_name
self.num_tlds = num_tlds
self.num_results = num_results
self.tracker_threshold = tracker_threshold
# pass utilities the database info
self.utilities = Utilities(db_name, db_engine)
# set up the analyzer we will be using throughout
self.analyzer = Analyzer(db_name, db_engine)
# number of decimal places to round to in reports
self.num_decimals = 2
# set up global db connection
if db_engine == 'sqlite':
from webxray.SQLiteDriver import SQLiteDriver
self.sql_driver = SQLiteDriver(db_name)
elif db_engine == 'postgres':
from webxray.PostgreSQLDriver import PostgreSQLDriver
self.sql_driver = PostgreSQLDriver(db_name)
else:
print('INVALID DB ENGINE FOR %s, QUITTING!' % db_engine)
quit()
print('\t=============================')
print('\t Checking Output Directories ')
print('\t=============================')
# creates a new directory if it doesn't exist already
self.report_path = self.utilities.setup_report_dir(self.db_name)
# this is used in various places to get owner information
self.domain_owners = self.utilities.get_domain_owner_dict()
# if we want to get sub-reports for the most frequent tlds we find
# them here
if self.num_tlds:
print('\t=====================')
print('\t Getting top %s tlds' % self.num_tlds)
print('\t=====================')
print('\t\tProcessing...', end='', flush=True)
self.top_tlds = self.analyzer.get_top_tlds(self.num_tlds)
print('done!')
print('\t\tThe top tlds are:')
for tld in self.top_tlds:
if tld: print('\t\t |- %s' % tld)
else:
self.top_tlds = [None]
# __init__
#####################
# REPORT GENERATORS #
#####################
def generate_db_summary_report(self,print_to_cli=True):
"""
outputs and stores report of basic data about how many records in db, etc.
"""
print('\t================')
print('\t General Summary')
print('\t================')
# get the relevant db summary data
db_summary = self.analyzer.get_db_summary()
# print to cli
if print_to_cli:
print("\t\tTotal Crawls:\t\t\t%s" % db_summary['total_crawls_ok'])
print("\t\tTotal Pages:\t\t\t%s" % db_summary['total_pages_ok'])
print("\t\tTotal Tasks Fail:\t\t%s" % db_summary['total_tasks_fail'])
print("\t\tTotal Tasks Attempted:\t\t%s" % db_summary['total_tasks_attempted'])
print("\t\t%% Pages OK:\t\t\t%.2f%%" % db_summary['percent_tasks_ok'])
print("\t\tTotal Errors:\t\t\t%s" % db_summary['total_errors'])
print("\t\tTotal Cookies:\t\t\t%s" % db_summary['total_cookies'])
print("\t\tTotal 3P Cookies:\t\t%s" % db_summary['total_3p_cookies'])
print("\t\tTotal Dom Storage:\t\t%s" % db_summary['total_dom_storage'])
print("\t\tTotal Websockets:\t\t%s" % db_summary['total_websockets'])
print("\t\tTotal Websocket Events:\t\t%s" % db_summary['total_websocket_events'])
print("\t\tTotal Requests:\t\t\t%s" % db_summary['total_requests'])
print("\t\tTotal Responses:\t\t%s" % db_summary['total_responses'])
print('\t\t%% Requests Received:\t\t%.2f%%' % db_summary['percent_requests_received'])
print("\t\t3P Requests:\t\t\t%s" % db_summary['total_3p_requests'])
print("\t\t3P Responses:\t\t\t%s" % db_summary['total_3p_responses'])
print('\t\t%% 3P Requests Received:\t\t%.2f%%' % db_summary['percent_3p_requests_received'])
print('\t\t'+'-'*40)
# write results to csv
csv_rows = []
csv_rows.append(('total_crawls_ok', db_summary['total_crawls_ok']))
csv_rows.append(('total_pages_ok', db_summary['total_pages_ok']))
csv_rows.append(('total_tasks_fail', db_summary['total_tasks_fail']))
csv_rows.append(('total_tasks_attempted', db_summary['total_tasks_attempted']))
csv_rows.append(('percent_pages_ok', db_summary['percent_tasks_ok']))
csv_rows.append(('total_errors', db_summary['total_errors']))
csv_rows.append(('total_cookies', db_summary['total_cookies']))
csv_rows.append(('total_3p_cookies', db_summary['total_3p_cookies']))
csv_rows.append(('total_dom_storage', db_summary['total_dom_storage']))
csv_rows.append(('total_websockets', db_summary['total_websockets']))
csv_rows.append(('total_websocket_events', db_summary['total_websocket_events']))
csv_rows.append(('total_requests', db_summary['total_requests']))
csv_rows.append(('total_responses', db_summary['total_responses']))
csv_rows.append(('percent_requests_received', db_summary['percent_requests_received']))
csv_rows.append(('total_3p_requests', db_summary['total_3p_requests']))
csv_rows.append(('total_3p_responses', db_summary['total_3p_responses']))
csv_rows.append(('percent_3p_requests_received', db_summary['percent_3p_requests_received']))
self.utilities.write_csv(self.report_path,'db_summary.csv', csv_rows)
# generate_db_summary_report
def generate_stats_report(self):
"""
High level stats
"""
print('\t=============================')
print('\t Processing High-Level Stats ')
print('\t=============================')
for tld_filter in self.top_tlds:
csv_rows = []
if tld_filter:
stats = self.analyzer.get_high_level_stats(tld_filter)
else:
stats = self.analyzer.get_high_level_stats()
if self.tracker_threshold:
filter_depth = self.tracker_threshold
else:
filter_depth = 'no_filter_used'
csv_rows.append(('n_pages', stats['total_pages']))
csv_rows.append(('n_crawls', stats['total_crawls']))
csv_rows.append(('%_pages_ssl', stats['percent_pages_ssl']))
csv_rows.append(('n_requests_received', stats['total_requests_received']))
csv_rows.append(('%_requests_received_ssl', stats['percent_requests_ssl']))
csv_rows.append(('n_1p_requests_received', stats['total_requests_received_1p']))
csv_rows.append(('%_1p_requests_received_ssl', stats['percent_1p_requests_ssl']))
csv_rows.append(('n_3p_requests_received', stats['total_requests_received_3p']))
csv_rows.append(('%_3p_requests_received_ssl', stats['percent_3p_requests_ssl']))
csv_rows.append(('average_page_load_time', stats['average_page_load_time']))
csv_rows.append(('%_w/3p_request', stats['percent_w_3p_request']))
csv_rows.append(('%_w/3p_cookie', stats['percent_w_3p_cookie']))
csv_rows.append(('%_w/3p_script', stats['percent_w_3p_script']))
csv_rows.append(('mean_3p_domains', stats['3p_domains_mean']))
csv_rows.append(('median_3p_domains', stats['3p_domains_median']))
csv_rows.append(('mode_3p_domains', stats['3p_domains_mode']))
csv_rows.append(('mean_3p_cookies', stats['3p_cookies_mean']))
csv_rows.append(('median_3p_cookies', stats['3p_cookies_median']))
csv_rows.append(('mode_3p_cookies', stats['3p_cookies_mode']))
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-stats.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'stats.csv',csv_rows)
# generate_stats_report
def generate_aggregated_tracking_attribution_report(self):
"""
generates ranked list of which entities collect data
from the greatest number of crawls ('aggregated_tracking_attribution.csv')
- entities which have subsidiaries are ranked according
to the crawls their subsidiaries get data from as well
- however, parent entities only get one hit on
a crawl which has multiple subsidiaries present
- for example, if a crawl has 'google analytics' and 'doubleclick'
that is only one hit for 'google'
"""
print('\t======================================')
print('\t Processing Aggregated Tracking Report ')
print('\t======================================')
for tld_filter in self.top_tlds:
csv_rows = []
# write out data to csv
for item in self.analyzer.get_aggregated_tracking_attribution(tld_filter):
csv_rows.append((
item['percent_crawls'],
item['owner_name'],
item['owner_country'],
self.utilities.get_domain_owner_lineage_combined_string(item['owner_id'])
)
)
# we want to first sort by owner name and then by percentage
# to account for cases where two owners have the same percentage value
csv_rows.sort(key=lambda x: x[1].lower())
csv_rows.sort(key=lambda x: x[0],reverse=True)
# insert header row after sort
csv_rows.insert(0, ('percentage_crawls_tracked','owner','owner_country','owner_lineage'))
# write out csv with tld prefix if applicable
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-aggregated_tracking_attribution.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'aggregated_tracking_attribution.csv',csv_rows)
# generate_aggregated_tracking_attribution_report
def generate_aggregated_3p_ssl_use_report(self):
"""
this report tells us the percentage of requests made to a given
third-party are encrypted
"""
print('\t=========================================')
print('\t Processing Aggregated 3P SSL Use Report ')
print('\t=========================================')
for tld_filter in self.top_tlds:
csv_rows = []
for item in self.analyzer.get_aggregated_3p_ssl_use(tld_filter):
csv_rows.append((
item['ssl_use'],
item['owner_name'],
item['owner_country'],
self.utilities.get_domain_owner_lineage_combined_string(item['owner_id'])
))
# we want to first sort by owner name and then by percentage
# to account for cases where two owners have the same percentage value
csv_rows.sort(key=lambda x: x[1].lower())
csv_rows.sort(key=lambda x: x[0],reverse=True)
# insert header row after sort
csv_rows.insert(0, ('percent_requests_encrypted','owner','owner_country','owner_lineage'))
# write out csv with tld prefix if applicable
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_ssl_use.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_ssl_use.csv',csv_rows)
# generate_aggregated_3p_ssl_use_report
def generate_3p_domain_report(self):
"""
This report tells us the most commonly occuring third-party domains.
"""
print('\t==============================')
print('\t Processing 3P Domains Report ')
print('\t==============================')
for tld_filter in self.top_tlds:
csv_rows = []
csv_rows.append(('percent_total','domain','owner','owner_country', 'owner_lineage'))
# get_3p_domain_percentages returns a list, we slice it to get only desired num_results
for item in self.analyzer.get_3p_domain_percentages(tld_filter)[:self.num_results]:
# figure out the lineage string if we know who owns the domain
if item['owner_id'] != None:
lineage_string = self.utilities.get_domain_owner_lineage_combined_string(item['owner_id'])
else:
lineage_string = None
csv_rows.append((
item['percent_crawls'],
item['domain'],
item['owner_name'],
item['owner_country'],
lineage_string
))
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_domains.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_domains.csv',csv_rows)
# generate_3p_domain_report
def generate_3p_request_report(self,request_type=None):
"""
this queries the db to get all requests, domains, or domain owners
next they are counted to find the most common
and formatted to csv rows and returned
"""
if request_type == 'script':
print('\t=============================')
print('\t Processing 3P Script Report ')
print('\t=============================')
else:
print('\t==============================')
print('\t Processing 3P Request Report ')
print('\t==============================')
for tld_filter in self.top_tlds:
csv_rows = []
csv_rows.append(('percent_total','request','type','domain','owner','owner_country','owner_lineage'))
# get_3p_domain_percentages returns a list, we slice it to get only desired num_results
for item in self.analyzer.get_3p_request_percentages(tld_filter,request_type)[:self.num_results]:
# figure out the lineage string if we know who owns the domain
if item['request_owner_id'] != None:
lineage_string = self.utilities.get_domain_owner_lineage_combined_string(item['request_owner_id'])
else:
lineage_string = None
csv_rows.append((
item['percent_crawls'],
item['request_url'],
item['request_type'],
item['request_domain'],
item['request_owner_name'],
item['request_owner_country'],
lineage_string
))
if tld_filter:
if request_type:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_'+request_type+'.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_request.csv',csv_rows)
else:
if request_type:
self.utilities.write_csv(self.report_path,'3p_'+request_type+'.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_request.csv',csv_rows)
# generate_3p_request_report
def generate_data_transfer_report(self):
"""
These reports tell us how much data was transferred across several dimensions
"""
print('\t==================================')
print('\t Processing Data Transfer Reports ')
print('\t==================================')
for tld_filter in self.top_tlds:
# set up filter and file names
if tld_filter:
summary_file_name = tld_filter+'-data_xfer_summary.csv'
domain_file_name = tld_filter+'-data_xfer_by_domain.csv'
aggregated_file_name = tld_filter+'-data_xfer_aggregated.csv'
else:
summary_file_name = 'data_xfer_summary.csv'
domain_file_name = 'data_xfer_by_domain.csv'
aggregated_file_name = 'data_xfer_aggregated.csv'
# get the data from db, tuple of (response_domain, size, is_3p (boolean), domain_owner_id)
response_sizes = self.sql_driver.get_response_sizes()
# initialize vars
first_party_data = 0
third_party_data = 0
total_data = 0
# need Counter object, allows sorting later
domain_data = collections.Counter()
owner_data = collections.Counter()
# process each row
for item in response_sizes:
response_domain = item[0]
response_size = item[1]
response_is_3p = item[2]
domain_owner_id = item[3]
# this is the measure of all data downloaded
total_data += response_size
# measures for third and first party data
if response_is_3p:
third_party_data += response_size
else:
first_party_data += response_size
# data by domain, increment if already in there, otherwise new entry
if response_domain in domain_data:
domain_data[response_domain] += response_size
else:
domain_data[response_domain] = response_size
# only if we know the owner, increment
if domain_owner_id:
for lineage_id in self.utilities.get_domain_owner_lineage_ids(domain_owner_id):
if lineage_id in owner_data:
owner_data[lineage_id] += response_size
else:
owner_data[lineage_id] = response_size
# avoid divide-by-zero
if total_data == 0:
print('\t\tTotal data is zero, no report')
return
# output data to csv
summary_data_csv = []
summary_data_csv.append(('party','percent_total','data_transfered_bytes'))
summary_data_csv.append(('all','100',total_data))
summary_data_csv.append((
'First',
round((first_party_data/total_data)*100, self.num_decimals),
first_party_data))
summary_data_csv.append((
'Third',
round((third_party_data/total_data)*100, self.num_decimals),
third_party_data))
self.utilities.write_csv(self.report_path,summary_file_name, summary_data_csv)
# sort and output ranked data
domain_data = domain_data.most_common()
domain_data.sort()
domain_data.sort(reverse=True, key=lambda item:item[1])
# for csv data
domain_data_csv = []
domain_data_csv.append(('percent_total','domain','data_transfered_bytes'))
# if num_results is None we get everything, otherwise stops at limit
for item in domain_data[:self.num_results]:
domain_data_csv.append((
round((item[1]/total_data)*100,self.num_decimals),
item[0],
item[1]))
self.utilities.write_csv(self.report_path,domain_file_name, domain_data_csv)
owner_data = self.utilities.get_most_common_sorted(owner_data)
owner_data_csv = []
owner_data_csv.append(('percent_total','owner','owner_country','owner_lineage','data_transfered_bytes'))
# get results for all known owners
for item in owner_data:
owner_data_csv.append((
round((item[1]/total_data)*100,self.num_decimals),
self.domain_owners[item[0]]['owner_name'],
self.domain_owners[item[0]]['country'],
self.utilities.get_domain_owner_lineage_combined_string(item[0]),
item[1]
))
self.utilities.write_csv(self.report_path,aggregated_file_name, owner_data_csv)
# generate_data_transfer_report
def generate_use_report(self):
"""
This function handles the process of generating a csv report which details
what percentage of pages use third-party content for specific uses,
the number of requests made for a given type of use on a per-page basis,
and the percentage of such requests which correspond to a third-party
cookie.
"""
print('\t==========================')
print('\t Processing 3P Use Report ')
print('\t==========================')
for tld_filter in self.top_tlds:
use_data = self.analyzer.get_3p_use_data(tld_filter)
all_uses = use_data['all_uses']
percentage_by_use = use_data['percentage_by_use']
average_use_occurance_per_page = use_data['average_use_occurance_per_crawl']
percentage_use_w_cookie = use_data['percentage_use_w_cookie']
percentage_use_ssl = use_data['percentage_use_ssl']
csv_rows = []
csv_rows.append(('use_category','percent_crawls_w_use','ave_occurances_per_page','percentage_of_use_w_cookie', 'percentage_of_use_ssl'))
for use in sorted(all_uses):
if percentage_by_use[use] != None:
csv_rows.append((
use,
percentage_by_use[use],
average_use_occurance_per_page[use],
percentage_use_w_cookie[use],
percentage_use_ssl[use]
))
else:
csv_rows.append((use,None,None,None,None))
# write out csv with tld prefix if applicable
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_uses.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_uses.csv',csv_rows)
# generate_use_report
def generate_per_page_network_report(self):
"""
this report generates data necessary for graph/network analysis by
outputting a list of page domains and the requests/owners they connect to
on a per-page basis
"""
print('\t====================================')
print('\t Processing Per-Page Network Report ')
print('\t====================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append(('page_start_url','page_final_url','page_accessed','3p_request_domain','3p_domain_owner','3p_domain_owner_country'))
# process all records
for item in self.analyzer.get_page_to_3p_network():
csv_rows.append((
item['page_start_url'],
item['page_final_url'],
item['page_accessed'],
item['request_domain'],
item['request_owner_name'],
item['request_owner_country']
))
self.utilities.write_csv(self.report_path,'per_page_network_report.csv', csv_rows)
# generate_per_page_network_report
def generate_per_site_network_report(self):
"""
this report generates data necessary for graph/network analysis by
outputting a list of page domains and the requests/owners they connect to
aggregated on a per-site basis (eg combining all pages)
"""
print('\t================================')
print('\t Processing Site Network Report ')
print('\t================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append(('page_domain','3p_request_domain','3p_domain_owner','3p_domain_owner_country'))
for item in self.analyzer.get_site_to_3p_network():
csv_rows.append((
item['page_domain'],
item['request_domain'],
item['request_owner_name'],
item['request_owner_country']
))
self.utilities.write_csv(self.report_path,'per_site_network_report.csv', csv_rows)
# generate_per_site_network_report
def generate_all_pages_request_dump(self):
"""
Full dump of all requests loaded by all pages across all load times.
Default is 3p only, can be overridden.
"""
print('\t===================================')
print('\t Processing All Pages request Dump ')
print('\t===================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append((
'accessed',
'start_url',
'final_url',
'request_url',
'request_domain',
'domain_owner'
))
# process all records
for item in self.analyzer.get_all_pages_requests():
csv_rows.append((
item['accessed'],
item['start_url'],
item['final_url'],
item['request_url'],
item['request_domain'],
item['request_domain_owner']
))
self.utilities.write_csv(self.report_path,'all_pages_request_dump.csv', csv_rows)
# generate_all_pages_request_dump
def generate_all_pages_cookie_dump(self):
"""
Full dump of all cookies loaded by all pages across all load times.
Default is 1p and 3p, can be overridden to 3p only.
"""
print('\t==================================')
print('\t Processing All Pages Cookie Dump ')
print('\t==================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append((
'accessed',
'start_url',
'final_url',
'cookie_domain',
'cookie_owner',
'cookie_name',
'cookie_value'
))
# process all records
for item in self.analyzer.get_all_pages_cookies():
csv_rows.append((
item['accessed'],
item['start_url'],
item['final_url'],
item['cookie_domain'],
item['cookie_owner'],
item['cookie_name'],
item['cookie_value']
))
self.utilities.write_csv(self.report_path,'all_pages_cookie_dump.csv', csv_rows)
# generate_all_pages_request_dump
def generate_site_host_report(self):
"""
First, we update the domain table with the owners
of the various ip addresses which gives us
a mapping of pages to hosts.
Second, we generate a network report for
site domains to hosts.
"""
print('\t=====================')
print('\t Updating Site Hosts ')
print('\t=====================')
self.analyzer.update_site_hosts()
print('\t==============================')
print('\t Generating Site Host Network ')
print('\t==============================')
site_host_data = self.analyzer.get_site_host_network()
if len(site_host_data) == 0:
print('\t\tNo site host data, skipping report.')
return
# put output here
csv_rows = []
# header row for csv
csv_rows.append((
'page_domain',
'host_name'
))
for item in site_host_data:
csv_rows.append((
item['site_domain'],
item['host_name']
))
self.utilities.write_csv(self.report_path,'site_hosts-network.csv', csv_rows)
print('\t============================================')
print('\t Generating Aggregate Host Ownership Report ')
print('\t============================================')
owner_occurances = []
for owner, in self.sql_driver.get_ip_owners():
owner_occurances.append(owner)
csv_rows = [('owner','percent_sites_w_owner')]
for item in self.utilities.get_most_common_sorted(owner_occurances):
csv_rows.append((item[0],100*(item[1]/len(owner_occurances))))
self.utilities.write_csv(self.report_path,'site_hosts-aggregated.csv', csv_rows)
# generate_site_host_report
##############
# POLICYXRAY #
##############
def initialize_policy_reports(self):
"""
Run various pre-production steps.
"""
print('\t====================================')
print('\t Updating 3p Domain Disclosure Data ')
print('\t====================================')
#self.analyzer.update_request_disclosure()
self.analyzer.update_crawl_disclosure()
print('\t\t...done!')
print('\t======================================')
print('\t Getting Policy Types List and Counts ')
print('\t======================================')
# pre-populate with 'None' which gives all policies
self.policy_types = [
{
'type' : None,
'count' : self.analyzer.get_policy_count()
}
]
for policy_type, in self.sql_driver.get_available_policy_types():
self.policy_types.append({
'type': policy_type,
'count': self.analyzer.get_policy_count(policy_type=policy_type)
})
print('\t\t...done!')
# initialize_policy_reports
def generate_policy_summary_report(self):
"""
Conducts prelminary analysis steps, determines what types of
policies we have, and then initiates the pertinent reports.
"""
print('\t==================================')
print('\t Generating Policy Summary Report ')
print('\t==================================')
# header row
csv_rows = [('Type','N','Word Count','FK Grade','FRE', '% 3P Disclosed')]
# get results for each policy_type
for policy_type in self.policy_types:
# makes reports clearer than 'None'
if policy_type['type'] == None:
this_policy_type = 'all'
else:
this_policy_type = policy_type['type']
print('\t\tProcessing %s...' % this_policy_type, end='', flush=True)
# fetch results
readability_scores = self.analyzer.get_readability_scores(policy_type=policy_type['type'])
csv_rows.append((
this_policy_type,
policy_type['count'],
self.analyzer.get_average_policy_word_count(policy_type=policy_type['type']),
readability_scores['ave_fkg'],
readability_scores['ave_fre'],
self.analyzer.get_percent_crawl_3p_domains_disclosed(policy_type=policy_type['type'])
))
print('done!')
self.utilities.write_csv(self.report_path,'policy-summary.csv', csv_rows)
# generate_policy_summary_report
def generate_policy_owner_disclosure_reports(self):
"""
Determines what types of policies we have, and then
initiates the pertinent reports.
"""
print('\t======================================')
print('\t Generating Company Disclosure Report ')
print('\t======================================')
# header row
csv_rows = [('Type','N','%% 3P Disclosed')]
print('\t\tProcessing ...', end='', flush=True)
company_results = self.analyzer.get_disclosure_by_request_owner()
csv_rows = [('Domain Owner','Total Occurances','Total Disclosures','Percent Disclosed')]
for item in company_results:
csv_rows.append((item,company_results[item][0],company_results[item][1],round(company_results[item][2],2)))
print('done!')
self.utilities.write_csv(self.report_path,'policy-owner_disclosure.csv',csv_rows)
# generate_policy_owner_disclosure_reports
def generate_policy_gdpr_report(self):
"""
Determine percentage of all policy types
that contain gdpr article 9 terms.
"""
print('\t==============================')
print('\t Generating GDPR Term Report ')
print('\t==============================')
term_list = [
'racial or ethnic origin', 'political opinions',
'religious or philosophical beliefs', 'trade union membership',
'genetic data', 'biometric data',
'data concerning health', 'sex life',
'sexual orientation'
]
self.generate_terms_report('policy-gdpr_terms.csv',term_list)
# generate_policy_gdpr_report
def generate_policy_pacification_report(self):
"""
Determine percentage of all policy types
that contain pacification terms.
"""
print('\t=====================================')
print('\t Generating Pacification Term Report ')
print('\t=====================================')
term_list = ['we value', 'we respect', 'important to us', 'help you', 'we care', 'committed to protecting', 'cares about', 'transparency']
self.generate_terms_report('policy-pacification_terms.csv',term_list)
# generate_policy_pacification_report
def generate_policy_pii_report(self):
"""
Determine percentage of all policy types
that contain pacification terms.
"""
print('\t============================')
print('\t Generating PII Term Report ')
print('\t============================')
term_list = ['ip address','internet protocol address', 'browser type', 'operating system']
self.generate_terms_report('policy-pii_terms.csv',term_list)
# generate_policy_pacification_report
def generate_terms_report(self,report_name,term_list):
"""
Generic function to generate reports on how
often terms appear in policies.
"""
# set up header row
csv_rows = []
header_row = ('Type','any term')
for term in term_list:
header_row = header_row + (term,)
csv_rows.append(header_row)
# get results for each policy_type
for policy_type in self.policy_types:
# makes reports clearer than 'None'
if policy_type['type'] == None:
this_policy_type = 'all'
else:
this_policy_type = policy_type['type']
print('\t\tProcessing %s...' % this_policy_type, end='', flush=True)
this_csv_row = (this_policy_type,)
this_csv_row = this_csv_row + (self.analyzer.get_terms_percentage(term_list,policy_type=policy_type['type'],policy_type_count=policy_type['count']),)
for term in term_list:
this_csv_row = this_csv_row + (self.analyzer.get_terms_percentage([term],policy_type=policy_type['type'],policy_type_count=policy_type['count']),)
csv_rows.append(this_csv_row)
print('done!')
self.utilities.write_csv(self.report_path,report_name,csv_rows)
# generate_policy_gdpr_report
# Reporter
|
thezedwards/webXray
|
webxray/Reporter.py
|
Reporter.py
|
py
| 30,709 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "webxray.Utilities.Utilities",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "webxray.Analyzer.Analyzer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "webxray.SQLiteDriver.SQLiteDriver",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "webxray.PostgreSQLDriver.PostgreSQLDriver",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 384,
"usage_type": "call"
}
] |
1741943302
|
import typing
from typing import Optional, Tuple, Any, Type, Dict
import numpy as np
from .mode import Q
from piquasso.core import _mixins
from piquasso.api.exceptions import PiquassoException, InvalidProgram
if typing.TYPE_CHECKING:
from piquasso.api.program import Program
class Instruction(_mixins.DictMixin, _mixins.RegisterMixin, _mixins.CodeMixin):
"""
Base class for all instructions.
Args:
params: Mapping of parameters specified by the users.
extra_params: Mapping of extra parameters, typically calculated ones.
"""
NUMBER_OF_MODES: Optional[int] = None
_subclasses: Dict[str, Type["Instruction"]] = {}
def __init__(
self, *, params: Optional[dict] = None, extra_params: Optional[dict] = None
) -> None:
self._params: dict = params or dict()
self._extra_params: dict = extra_params or dict()
@property
def params(self) -> dict:
return self._params
@property
def _all_params(self) -> dict:
return {**self._params, **self._extra_params}
@property
def modes(self) -> Tuple[int, ...]:
return getattr(self, "_modes", tuple())
@modes.setter
def modes(self, value: Tuple[int, ...]) -> None:
self._validate_modes(value)
self._modes = value
def _as_code(self) -> str:
if hasattr(self, "modes"):
mode_string = ", ".join([str(mode) for mode in self.modes])
else:
mode_string = ""
if hasattr(self, "params"):
params_string = "{}".format(
", ".join(
[
f"{key}={self._param_repr(value)}"
for key, value in self.params.items()
]
)
)
else:
params_string = ""
return f"pq.Q({mode_string}) | pq.{self.__class__.__name__}({params_string})"
@staticmethod
def _param_repr(value: Any) -> str:
if isinstance(value, np.ndarray):
return "np." + repr(value)
return value
def on_modes(self, *modes: int) -> "Instruction":
if modes is not tuple():
self.modes: Tuple[int, ...] = modes
return self
def _apply_to_program_on_register(self, program: "Program", register: Q) -> None:
program.instructions.append(self.on_modes(*register.modes))
@classmethod
def from_dict(cls, dict_: dict) -> "Instruction":
"""Creates an :class:`Instruction` instance from a dict specified.
Args:
dict_ (dict):
The desired :class:`Instruction` instance in the format of a `dict`.
Returns:
Instruction:
An :class:`Instruction` initialized using the specified `dict`.
"""
class_ = cls.get_subclass(dict_["type"])
instruction = class_(**dict_["attributes"]["constructor_kwargs"])
instruction.modes = dict_["attributes"]["modes"]
return instruction
@classmethod
def set_subclass(cls, instruction: Type["Instruction"]) -> None:
"""Registers a class in the instruction subclass map.
This is meaningful in contexts when one has multiple instructions with the same
name.
Example:
When one creates a custom beamsplitter with name `Beamsplitter` and
subclasses :class:`~piquasso.instructions.gates.Beamsplitter`, then for e.g.
executing a Blackbird code will be performed with the custom one, not the
original one. When one wants to use the original one in this case, one can
reset it with this method.
Args:
instruction (Type[Instruction]): The instruction class to be registered.
Raises:
PiquassoException:
When the class is not actually an instance of :class:`Insruction`.
"""
if not issubclass(instruction, Instruction):
raise PiquassoException(
f"The instruction '{instruction}' needs to be a subclass of "
"'pq.Instruction'."
)
cls._subclasses[instruction.__name__] = instruction
@classmethod
def get_subclass(cls, name: str) -> Type["Instruction"]:
"""Returns the instruction subclass specified by its name.
Returns:
Type[Instruction]: The instruction class.
"""
return cls._subclasses[name]
def __repr__(self) -> str:
if hasattr(self, "modes"):
modes = "modes={}".format(self.modes)
else:
modes = ""
if getattr(self, "params") != {}:
params = "{}, ".format(
", ".join([f"{key}={value}" for key, value in self.params.items()])
)
else:
params = ""
classname = self.__class__.__name__
return f"<pq.{classname}({params}{modes})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, Instruction):
return False
return self.modes == other.modes and self.params == other.params
def __init_subclass__(cls) -> None:
super().__init_subclass__()
cls.set_subclass(cls)
def _validate_modes(self, modes):
if self.NUMBER_OF_MODES is not None and len(modes) != self.NUMBER_OF_MODES:
raise InvalidProgram(
f"The modes '{modes}' got specifed for the instruction '{self}', but "
f"exactly '{self.NUMBER_OF_MODES}' mode needs to be specified. "
f"Concretely, the total number of modes specified for this instruction "
f"is 'len(modes) == len({modes}) == {len(modes)} != "
f"{self.NUMBER_OF_MODES}'."
)
class Preparation(Instruction):
"""Base class for preparations."""
class Gate(Instruction):
"""Base class for gates."""
class Measurement(Instruction):
r"""Base class for measurements."""
|
Budapest-Quantum-Computing-Group/piquasso
|
piquasso/api/instruction.py
|
instruction.py
|
py
| 5,969 |
python
|
en
|
code
| 19 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "piquasso.core._mixins.DictMixin",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "piquasso.core._mixins",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "piquasso.core._mixins.RegisterMixin",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "piquasso.core._mixins.CodeMixin",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "mode.Q",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "piquasso.api.exceptions.PiquassoException",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "piquasso.api.exceptions.InvalidProgram",
"line_number": 177,
"usage_type": "call"
}
] |
36246067579
|
from flask import Flask, request, render_template
from flask_cors import CORS
from waitress import serve
def response(code, message, data=None):
# code=0 for success, code=1 for fail
return {'code': code, 'message': message, 'data': data}
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
# I changed the jinja expression delimiter from {{...}} to %%...%%
# because it conflicts with the Vue template syntax {{}}
variable_start_string='%%',
variable_end_string='%%',
))
app = CustomFlask(__name__)
app.config['JSON_SORT_KEYS'] = False
app.config['TEMPLATES_AUTO_RELOAD'] = True
CORS(app)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/get-info-from-backend', methods=['POST'])
def direction_control():
data = request.get_json()
text = data['text']
resData = text + ' ,I got you back'
return response(0, 'success', resData)
def main():
# for develop mode
app.run(host='0.0.0.0', port=8080, debug=True)
# for production mode
# serve(app,host="0.0.0.0",port=8080)
if __name__ == "__main__":
main()
|
nofear195/flask-vue-project-template
|
main.py
|
main.py
|
py
| 1,179 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.Flask.jinja_options.copy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.Flask.jinja_options",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask_cors.CORS",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 35,
"usage_type": "name"
}
] |
30728272330
|
import fileinput,re
from collections import defaultdict
def signum(x): return 1 if x > 0 else (0 if x == 0 else -1)
p1, p2 = defaultdict(lambda: 0), defaultdict(lambda: 0)
ll = [l.strip() for l in fileinput.input()]
for l in ll:
x1, y1, x2, y2 = map(int, re.findall("\d+", l))
xx, yy = signum(x2 - x1), signum(y2 - y1)
for i in range(max(abs(x2-x1), abs(y2-y1)) + 1):
x, y = x1 + i*xx, y1 + i*yy
if x1 == x2 or y1 == y2:
p1[(x, y)] += 1
p2[(x, y)] += 1
print(len([x for x in p1 if p1[x] >= 2]),
len([x for x in p2 if p2[x] >= 2]))
|
mdaw323/alg
|
adventofcode2021/5.py
|
5.py
|
py
| 592 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "fileinput.input",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 12,
"usage_type": "call"
}
] |
72999750909
|
import re
import json
import requests
from bs4 import BeautifulSoup
from lxml import etree
from pyquery import PyQuery as pq
from Alion_Crawl.CRAW_FUNCTION.request import *
headers = {
'User-Agent':'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36'
}
def douban_movie(url):
html = requests_text(url)
soup = BeautifulSoup(html, "lxml")
content = soup.find('div', class_='article')
images = content.find_all('img')
picture_name_list = [image['alt'] for image in images]
#picture_link_list = [image['src'] for image in images]
#urllib.request.urlretrieve(picture_link, '/home/lin/img/douban_books/%s.jpg' % picture_name)
return picture_name_list
def news(url):
html = requests_text(url)
doc = pq(html)
news = doc(".newsList ul li a").items()
#print(news)
news_list = [new.text() for new in news]
#news_urls_list = [new.attr.href for new in news]
#print(news_urls_list)
return news_list
def jingdong_comment(url):
comments = []
html = requests_text(url)
data = html.split('(', 1)[1] # 去掉json不规范得地方
data = data[0:len(data) - 2]
data_json = json.loads(str(data))['comments']
for i in list(range(len(data_json))):
comment = data_json[i]['content']
comments.append(comment)
return comments
def maoyan_movie(url):
movie_list = []
html = requests_text(url)
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?src="(.*?)".*?name"><a'
+ '.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
movie_dict = {}
movie_dict['index'] = item[0]
movie_dict['image'] = item[1]
movie_dict['title'] = item[2]
movie_dict['actor'] = item[3].strip()[3:]
movie_dict['times'] = item[4].strip()[5:]
movie_dict['score'] = item[5] + item[6]
movie_list.append(movie_dict)
return movie_list
def picture(url):
pic_list = []
res = requests_json(url)
if res.get('data'):
for item in res.get('data'):
title = item.get('title')
pic_list.append(title)
return pic_list
def novel_download(url):
novel_list = []
res = requests_text(url)
#print(res)
soup = BeautifulSoup(res, "lxml")
info = soup.find('div', class_='wrapper_main')
title = info.find('div',class_='h1title').h1.text.strip()[2:]
print(items)
def sunan_job(url):
job_list = []
res = requests_text(url)
infos = etree.HTML(res).xpath('//div[@class="dw_table"]/div[@class="el"]')
for info in infos:
try:
items = {}
items['posttime'] = info.xpath('.//span[@class="t5"]/text()')[0].strip()
items['salary'] = info.xpath('.//span[@class="t4"]/text()')[0].strip()
items['company'] = info.xpath('.//span[@class="t2"]/a[@target="_blank"]/text()')[0]
items['workingplace'] = info.xpath('.//span[@class="t3"]/text()')[0][0:2].strip()
items['jobname'] = info.xpath('.//p[@class="t1 "]/span/a[@target="_blank"]/text()')[0].strip()
items['info_id'] = info.xpath('.//p[@class="t1 "]/span/a/@href')[0].strip()
job_list.append(items)
except :
continue
return job_list
# def douyin(url):
###
# res_list = []
# res = requests.get(url, headers=headers)
# # print(res.content)
# path = "./img/job_anal/" + url[-20:-8] + '.mp4'
# with open(path, "wb") as file:
# file.write(res.content)
# info = str(url) + ".mp4 下载完成"
# res_list.append(info)
# return res_list
#qq
|
if-always/Alion-Crawl
|
CRAW_FUNCTION/all_functions.py
|
all_functions.py
|
py
| 3,822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyquery.PyQuery",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 88,
"usage_type": "name"
}
] |
27189531533
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 23:16:08 2018
@author: Chat
"""
import pip
def install():
pip.main(['install', 'beautifulsoup4'])
pip.main(['install', 'weather-api'])
pip.main(['install', 'urllib3'])
import datetime
from urllib.request import urlopen
from bs4 import BeautifulSoup
from weather import Weather, Unit
import superSecret as s
import send_email_file
def fetch_weather(your_location):
global date, condition, now
now = datetime.datetime.now()
date = [now.month, now.day, now.year]
weather = Weather(unit=Unit.FAHRENHEIT)
location = weather.lookup_by_location(your_location)
forecasts = location.forecast()#
i = []
for x in forecasts:
i.append(vars(x))
#Fetches Todays Wether then stores the high and low in a dictionary with the date as the key
todays_forecast = {}
todays_forecast[i[0]['_forecast_data']['date']] = [i[0]['_forecast_data']['high'], i[0]['_forecast_data']['low']]
return todays_forecast
def shorts():
web_page = 'http://caniwearshorts.today/?location=Auburn%2C+AL'
page = urlopen(web_page)
soup = BeautifulSoup(page, 'html.parser')
shorts_span = soup.find('h1', attrs={'style': 'font-size: 70px'})
shorts = shorts_span.text
return shorts
def compose(carrier, phone, your_location):
if carrier == "Verizon":
to = phone + "@vtext.com"
elif carrier == "Sprint":
to = phone + "@messaging.sprintpcs.com"
elif carrier == "AT&T":
to = phone + "@txt.att.net"
elif carrier == "T-Mobile":
to = phone + "@tmomail.net"
else:
return("Invalid Carrier!!!")
todays_weather = fetch_weather(your_location)#
msg = message(date_str, todays_weather, your_location, key_date)
mail = send_email_file.sendemail(s.username, to, "", "Good Morning - Weather Bot", msg, s.username, s.password)
return mail
def message(date_str, todays_weather, your_location, key_date):
message = date_str + "\r\r" + conditionText(your_location) + "\r\r" + "Should I wear shorts?: " + str(shorts()) + "\r" + "Low: " + str(todays_weather[key_date][1]) + "\r" + "High: " + str(todays_weather[key_date][0])
return(message)
def conditionText(your_location):
weather = Weather(unit=Unit.FAHRENHEIT)
location = weather.lookup_by_location(your_location)
condition = location.condition()
if condition.text() == "Scattered Thunderstorms":
condition_text = "It might be a good idea to bring an umbrella if you're going out."
elif condition.text() == "Thunderstorms":
condition_text = "You should definatly bring an umbrella out with you today."
elif condition.text() == "Sunny":
condition_text = "No rain for today! Enjoy the Sun."
elif condition.text() == 'Mostly Cloudy':
condition_text = "There will be dark skys but no rain in the forecast!"
elif condition.text() == 'Breezy':
condition_text = "There will be lots of wind. Don't get blown over!"
elif condition.text() == 'Clear':
condition_text = "Its clear out today!"
else:
condition_text = condition.text()
return condition_text
now = datetime.datetime.now()
date = [now.month, now.day, now.year]
date_str = str(date[0]) +"/"+ str(date[1]) +"/"+ str(date[2])
key_date = str(date[1]) + " " + now.strftime("%b") + " " + str(date[2])
|
jcsumlin/weather-bot
|
weather_pjt.py
|
weather_pjt.py
|
py
| 3,467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pip.main",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pip.main",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pip.main",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "weather.Weather",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "weather.Unit.FAHRENHEIT",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "weather.Unit",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "weather.lookup_by_location",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "send_email_file.sendemail",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "superSecret.username",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "superSecret.password",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "weather.Weather",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "weather.Unit.FAHRENHEIT",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "weather.Unit",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "weather.lookup_by_location",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 91,
"usage_type": "attribute"
}
] |
36647090017
|
import pickle
import os
import sys
import pprint
import tempfile
import warnings
import gridfs
from pymongo import MongoClient
from bson import ObjectId
from datetime import datetime
import torch
from sacred import Experiment
from sacred.observers import MongoObserver
def add_mongodb(ex: Experiment):
uri = get_mongo_uri()
print("Using MongoDB observer: " + uri)
ex.observers.append(MongoObserver.create(uri))
def add_package_sources(ex: Experiment):
this_dir = os.path.dirname(__file__)
package_dirs = [this_dir, os.path.join(this_dir, "..")]
for package_dir in package_dirs:
for name in os.listdir(package_dir):
if name.endswith(".py"):
ex.add_source_file(os.path.abspath(os.path.join(package_dir, name)))
def load_weights(model, artifact_filename, mode='eval'):
# pytorch needs a "real" (fileno) file
with open(artifact_filename, mode='rb') as f:
model.load_state_dict(torch.load(f))
if mode == 'eval':
model.eval()
else:
model.train()
model.cuda()
def get_db(database_name='sacred'):
warnings.warn("This code needs some cleanup. Tell me before you want to use it.")
client = MongoClient(host=get_mongo_uri())
db = client.get_database(database_name)
return db, gridfs.GridFS(db)
def get_mongo_uri(config_dir="~/.config/docker_ports/"):
fill_in_url = "mongodb://mlproject_fill_in_mongodb"
uri = os.environ.get("MONGODB_URI", fill_in_url)
if uri == fill_in_url:
config_dir = os.path.expanduser(config_dir)
filename = os.path.join(config_dir, "docker_mongodb_ip")
with open(filename) as f:
return f.read()
else:
return uri
def get_id(find, sort, database_name='sacred'):
print(find)
print(sort)
db, _ = get_db(database_name)
result = db.runs.find_one(find, projection={'_id': 1}, sort=sort)
return result['_id']
def load_entry(id, database_name='sacred'):
db, _ = get_db(database_name)
run_entry = db.runs.find_one({'_id': id})
return run_entry
def load_experiment(experiment, id, database_name='sacred'):
run_entry = load_entry(id)
return experiment._create_run(config_updates=run_entry['config']), run_entry
def weight_files(db_entry):
weights = []
for artifact in db_entry['artifacts']:
name = artifact['name']
if name.endswith('weight'):
try:
iteration = int(name.split('.')[0].split('_')[-1])
except ValueError:
iteration = None
weights.append((iteration, artifact))
return sorted(weights, key=lambda w: w[0] or 0)
def load_model(db_entry):
raise Exception()
if type(db_entry) == int:
db_entry = load_entry(db_entry)
# TODO: fix this
#
iteration, latest_weight = weight_files(db_entry)[-1]
load_weights_from_db(model, latest_weight['file_id'])
return model
def load_weights_from_db(model, file_id=None, db_entry=None, database_name='sacred'):
if file_id is None:
iteration, latest_weight = weight_files(db_entry)[-1]
file_id = latest_weight['file_id']
_, fs = get_db(database_name)
f = fs.get(file_id)
with tempfile.NamedTemporaryFile() as tmpf:
tmpf.write(f.read())
tmpf.flush()
load_weights(model, tmpf.name)
f.close()
def print_experiment(ex, stream=None, skip=['captured_out', 'artifacts', 'results']):
if stream is None:
stream = sys.stdout
pp = pprint.PrettyPrinter(indent=2, stream=stream)
for k, v in sorted(ex.items()):
if k in skip:
continue
stream.write('\n')
stream.write("-" * 40 + " " + k + " " + "-" * 40 + "\n")
pp.pprint(v)
def get_metric(ex, name, db=None):
if db is None:
db, _ = get_db()
for metric in ex['info']['metrics']:
if metric['name'] == name:
return db.metrics.find_one({'_id': ObjectId(metric['id'])})
raise KeyError("No metric named {} found".format(name))
def yield_metrics(ex, marker=None, db=None):
if db is None:
db, _ = get_db()
if 'info' not in ex or 'metrics' not in ex['info']:
return
for metric in sorted(ex['info']['metrics'], key=lambda m: m['name']):
print(metric)
if marker is not None and marker not in metric['name']:
continue
yield db.metrics.find_one({'_id': ObjectId(metric['id'])})
class ResultStorage:
def __init__(self, run_id, iteration):
self.db, self.gridfs = get_db()
self.run_id = run_id
self.iteration = iteration
# self.db.runs.find_one_and_update(
# {'_id': self.run_id},
# {'results': {str(self.iteration): []}}
# )
def gridfs_filename(self, name):
return 'results://{}/{}/{}'.format(self.run_id, self.iteration, name)
@property
def iteration_key(self):
return 'results.{}'.format(self.iteration)
def result_key(self, name):
return '{}.{}'.format(self.iteration_key, name)
def get_result(self, name):
entry = self.db.runs.find_one(
{'_id': self.run_id},
{self.result_key(name): 1}
)
if ('results' in entry and str(self.iteration) in entry['results']
and name in entry['results'][str(self.iteration)]):
return entry['results'][str(self.iteration)][name]
def store(self, name, value):
self.invalidate(name)
f = self.gridfs.new_file(filename=self.gridfs_filename(name))
f.write(pickle.dumps(value))
f.close()
self.db.runs.update_one(
{'_id': self.run_id},
{
'$set': {self.result_key(name): {
'file_id': f._id,
'create_at': datetime.utcnow(),
'name': name
}},
}
)
def get(self, name):
result = self.get_result(name)
if result is None:
raise KeyError("No result under {}".format(name))
grid_file = self.gridfs.get(result['file_id'])
return pickle.loads(grid_file.read())
def cache(self, name, lazy_value):
try:
return self.get(name)
except KeyError:
value = lazy_value()
self.store(name, value)
return value
def invalidate(self, name):
result = self.get_result(name)
self.db.runs.update_one({'_id': self.run_id},
{'$unset': {self.result_key(name): {}}})
if result is not None:
self.gridfs.delete(result['file_id'])
def all_results(self):
entry = self.db.runs.find_one({'_id': self.run_id},
{'results.{}'.format(self.iteration): 1})
return entry['results'][str(self.iteration)]
def invalidate_all(self):
for name, result in self.all_results().items():
self.gridfs.delete(result['file_id'])
self.db.runs.update_one({'_id': self.run_id},
{'$unset': {self.iteration_key: {}}})
prefix_regex = "^{}.*".format(self.gridfs_filename(''))
for entry in self.db.fs.files.find({'filename': {'$regex': prefix_regex}}):
warnings.warn("Deleting orphan result: " + str(entry))
self.gridfs.delete(entry['_id'])
|
berleon/mlproject
|
mlproject/db.py
|
db.py
|
py
| 7,422 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sacred.Experiment",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sacred.observers.MongoObserver.create",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sacred.observers.MongoObserver",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sacred.Experiment",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "gridfs.GridFS",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "bson.ObjectId",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "bson.ObjectId",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "pickle.dumps",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "pickle.loads",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 238,
"usage_type": "call"
}
] |
10693679498
|
from typing import Union
def pizza_before_bh(loop: int) -> str:
result: str = ''
for _ in range(loop):
n_date: str
d_people: Union[str, list]
[n_date, d_people] = input().split(' ', 1)
d_people = list(map(int, d_people.split()))
if len(result) == 0:
if all(i == 1 for i in d_people):
result = n_date
return result if result else 'Pizza antes de FdI'
def main() -> None:
while True:
try:
values_input: list[str] = input().split()
d: int = int(values_input[1])
print(pizza_before_bh(d))
except EOFError:
break
if __name__ == '__main__':
main()
|
pdaambrosio/python_uri
|
Beginner/uri2554.py
|
uri2554.py
|
py
| 709 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
}
] |
72151456828
|
from pynput import keyboard
import random
print("Please choose Rock, Paper or Scissors by clicking 1, 2 or 3 respectively. To exit the game click escape.")
def on_press(key):
if key == keyboard.KeyCode(char='1'):
userMove = 1
rps(userMove)
elif key == keyboard.KeyCode(char='2'):
userMove = 2
rps(userMove)
elif key == keyboard.KeyCode(char='3'):
userMove = 3
rps(userMove)
else:
if keyboard.Key.esc:
pass
else:
print("Please click either 1, 2 or 3.")
def on_release(key):
if key == keyboard.Key.esc:
return False
def rps(userMove):
computerMove = random.randint(1, 3)
if userMove == computerMove:
print("Tie game, Play again or exit.")
elif userMove or computerMove == 1 & computerMove or userMove == 3:
if userMove == 1:
print("You win! Play again or exit.")
else:
print("You lose! Play again or exit.")
elif userMove or computerMove == 3 & computerMove or userMove == 2:
if userMove == 3:
print("You win! Play again or exit.")
else:
print("You lose! Play again or exit.")
elif userMove or computerMove == 2 & computerMove or userMove == 1:
if userMove == 2:
print("You win! Play again or exit.")
else:
print("You lose! Play again or exit.")
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
|
fraserreilly/rockPaperScissors
|
rockPaperScissors.py
|
rockPaperScissors.py
|
py
| 1,585 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pynput.keyboard.KeyCode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.KeyCode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.KeyCode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Listener",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard",
"line_number": 50,
"usage_type": "name"
}
] |
42969813970
|
import cv2
import mediapipe as mp
import numpy as np
current_image = 'test1.png'
mp_drawing = mp.solutions.drawing_utils
mp_selfie_segmentation = mp.solutions.selfie_segmentation
BG_COLOR = (255, 255, 255)
with mp_selfie_segmentation.SelfieSegmentation(
model_selection=1) as selfie_segmentation:
image = cv2.imread(current_image)
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = selfie_segmentation.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
condition = np.stack(
(results.segmentation_mask,) * 3, axis=-1) > 0.1
background = np.zeros(image.shape, dtype=np.uint8)
background[:] = BG_COLOR
output_image = np.where(condition, image, background)
cv2.imshow('MediaPipe Selfie Segmentation', output_image)
cv2.waitKey(0)
|
Pwegrzyn32/image-background-blur
|
blur2_0.py
|
blur2_0.py
|
py
| 898 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mediapipe.solutions",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.stack",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 29,
"usage_type": "call"
}
] |
7714160246
|
import sys
import os
import shutil
import string
import re
import argparse
from datetime import datetime, timedelta
import urllib3
import requests
import json
import yfinance as yf
verbose = False
ameritradeids = []
def ascii(text):
return text.decode('ascii', 'ignore')
class Stock:
name = ''
date = 0
iprice = 0.0
cost = 0.0
value = 0.0
price = 0.0
quantity = 0.0
ownedfor = 0.0
transfer = False
def __init__(self, sym, d, ip):
self.name = sym
self.date = d
self.ownedfor = float((datetime.now()-d).days) / 365.25
self.iprice = ip
def stockPriceWebScrape(self, symbol):
url = 'https://finance.yahoo.com/quote/'+symbol
http = urllib3.PoolManager()
response = http.request('GET', url)
html = ascii(response.data)
ret = find_in_html(html, '"USD","regularMarketPrice":{"raw":', ',')
return float(ret)
def stockPriceYFinance(self, symbol):
dt = (datetime.now() - timedelta(days=4)).strftime('%Y-%m-%d')
t = yf.download(symbol, start=dt)
return float('%.2f' % t['Close'][-1])
def stockPriceAlphaAdvantage(self, symbol):
sym = 'symbol=%s' % symbol
apikey = 'apikey=XXXXXXXXXXX'
url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&'+sym+'&'+apikey
resp = requests.get(url)
data = json.loads(resp.content)
return float(data['Global Quote']['05. price'])
def getPrice(self):
global verbose
if verbose:
print("PRICE GET: %s" % (self.name))
if self.name != 'Cash':
self.price = self.stockPriceYFinance(self.name)
else:
self.price = 1.0
print("PRICE: %s $%.2f" % (self.name, self.price))
class Portfolio:
totalcash = 0.0
networth = 0.0
stocklist = dict()
buylist = dict()
def stockTransaction(self, t):
if not t.symbol:
return
if t.symbol not in self.stocklist:
self.stocklist[t.symbol] = Stock(t.symbol, t.date, t.price)
stock = self.stocklist[t.symbol]
if t.action == 'Deposit':
stock.quantity += t.amount
self.totalcash += t.amount
return
if 'SHARE CLASS CONVERSION' in t.action:
del self.stocklist[t.symbol]
m = re.match('SHARE CLASS CONVERSION \((?P<n>[A-Z]*)\)', t.action)
if not m:
return
t.symbol = m.group('n')
stock.name = m.group('n')
stock.quantity = t.quantity
self.stocklist[t.symbol] = stock
return
if t.action == 'Split':
stock.iprice *= stock.quantity / (t.quantity + stock.quantity)
if t.action in ['Dividend Reinvest', 'Split', 'Buy', 'Sell']:
if t.action == 'Sell' and t.quantity >= 0:
t.quantity *= -1
stock.quantity += t.quantity
stock.quantity = 0 if stock.quantity < 0.0000001 else stock.quantity
act = 'Div' if t.action == 'Dividend Reinvest' else t.action
if t.action in ['Buy', 'Sell']:
if 'Cash' in self.stocklist:
self.stocklist['Cash'].quantity += t.amount
stock.value += t.amount
price = '%6.2f' % t.price
cost = '%12.5f' % (t.quantity*t.price)
else:
cost = price = ''
if t.action == 'Buy':
stock.cost -= t.amount
if t.symbol not in self.buylist:
self.buylist[t.symbol] = []
self.buylist[t.symbol].append({
'type': act,
'date': t.date.date(),
'quantity': t.quantity,
'price': price,
'cost': cost,
'tq': stock.quantity,
'tv': stock.value,
'tc': stock.cost,
})
if t.action == 'Transfer':
if not stock.transfer:
stock.quantity = t.quantity
else:
stock.quantity += t.quantity
stock.transfer = True
def getStockPrices(self):
for s in self.stocklist:
stock = self.stocklist[s]
if stock.quantity == 0.0:
continue
stock.getPrice()
def showPurchases(self, funds=[]):
print('')
print(' PURCHASES')
print('-------------------------------------------------------------------------')
print(' NAME DATE TYPE QUANTITY TotalQTY TotalCOST COST PRICE')
print('-------------------------------------------------------------------------')
for name in sorted(self.buylist):
if len(funds) > 0 and name not in funds:
continue
list = self.buylist[name]
for i in list:
print(' %-5s %s %5s %9.3f %9.3f %9.2f %12s %6s' % \
(name, i['date'], i['type'], i['quantity'], i['tq'],
i['tc'], i['cost'], i['price']))
def show(self):
print('')
print(' COMPLETED TRANSACTIONS')
print('---------------------------------------------')
print(' NAME QTY COST PROFIT RETURN')
print('---------------------------------------------')
cost = value = 0.0
for s in self.stocklist:
stock = self.stocklist[s]
if s == 'Cash' or stock.quantity > 0.0 or stock.cost == 0.0:
continue
print("%5s %8.3f %10s %10s %6.2f%%" % \
(s, stock.quantity,
'$%.2f'%stock.cost,
'$%.2f'%stock.value,
100.0*stock.value/stock.cost
))
value += stock.value
cost += stock.cost
print('---------------------------------------------')
print("TOTAL %10s %10s %6.2f%%" % \
('$%.2f'%cost, '$%.2f'%value, 100.0*value/cost))
print('')
print(' CURRENT INVESTMENTS')
div = '-----------------------------------------------------------------------------------------------------'
print(div)
print(' NAME PDATE AGE QTY PRICE COST VALUE PROFIT RETURN AVGRET CHANGE')
print(div)
profit = cost = value = 0.0
for s in sorted(self.stocklist):
stock = self.stocklist[s]
if stock.quantity == 0.0:
continue
v = stock.quantity * stock.price
if stock.cost > 0:
ret = 100.0*((v/stock.cost)-1)
p = v - stock.cost
else:
ret = p = 0
avgret = ret/stock.ownedfor
if stock.iprice > 0:
change = 100.0*((stock.price/stock.iprice)-1)
else:
change = 0
print("%5s %10s %9s %9.3f %7s %10s %10s %10s %6.2f%% %6.2f%% %6.2f%%" % \
(s, stock.date.date(),
'%.2f yrs' % stock.ownedfor,
stock.quantity,
'$%.2f' % stock.price,
'$%.2f' % stock.cost,
'$%.2f' % v,
'$%.2f' % p,
ret,
avgret,
change,
))
value += v
profit += p
cost += stock.cost
if self.totalcash > 0:
cost = self.totalcash
profit = value - cost
print(div)
print("TOTAL %10s %10s %10s %6.2f%%" % \
('$%.2f'%cost, '$%.2f'%value, '$%.2f'%profit, 100.0*((value/cost)-1)))
print('')
portfolio.networth += value
portfolio = Portfolio()
class Transaction:
first = True
constants = {
'scottrade': {
'fields': [
'Symbol', 'Quantity', 'Price', 'Action', 'TradeDate', 'SettledDate',
'Interest', 'Amount', 'Commission', 'Fees', 'ID', 'Description',
'ActionId', 'TradeNumber', 'RecordType', 'TaxLotNumber'
],
},
'ameritrade': {
'fields': [
'TradeDate', 'ID', 'Action', 'Quantity', 'Symbol', 'Price',
'Commission', 'Amount', 'NetCashBalance', 'Fees', 'ShortTermFee',
'RedemptionFee', 'SalesCharge'
]
},
'schwab': {
'fields': [
'TradeDate', 'Action', 'Symbol', 'Desc', 'Quantity', 'Price',
'Commission', 'Amount'
]
}
}
data = []
symbol = ''
quantity = 0.0
price = 0.0
id = ''
action = ''
date = 0
amount = 0.0
comm = 0.0
fees = 0.0
def __init__(self, broker, line, count):
self.rawline = line.strip()
self.fields = self.constants[broker]['fields']
line = line.replace('\r\n', '')
self.data = line.split(',')
self.symbol = self.val('Symbol').replace('.', '')
if broker == 'schwab' and not self.symbol:
m = re.match('.*\((?P<n>[A-Z]*)\)', self.val('Desc'))
if m:
self.symbol = m.group('n')
self.quantity = self.val('Quantity', True)
self.price = self.val('Price', True)
self.action = self.val('Action')
self.id = self.val('ID')
ds = self.val('TradeDate')
m = re.match('(?P<m>[0-9]*)/(?P<d>[0-9]*)/(?P<y>[0-9]*)', ds)
if not m:
doError('Bad date format %s' % ds)
self.date = datetime(int(m.group('y')), int(m.group('m')),
int(m.group('d')), 0, 0, 0, 999999-count)
self.amount = self.val('Amount', True)
if self.quantity and self.amount and not self.price:
self.price = abs(self.amount / self.quantity)
self.fees = self.val('Fees', True)
self.comm = self.val('Commission', True)
if 'ORDINARY DIVIDEND' in self.action or \
'LONG TERM GAIN DISTRIBUTION' in self.action or \
'SHORT TERM CAPITAL GAINS' in self.action or \
'Cash Dividend' in self.action or \
'Long Term Cap Gain' in self.action or \
'Reinvest Dividend' in self.action:
self.action = 'Dividend Reinvest'
elif 'STOCK SPLIT' in self.action and self.quantity > 0:
self.action = 'Split'
elif 'TRANSFER OF SECURITY' in self.action and self.quantity > 0:
self.action = 'Transfer'
elif ('Bought' in self.action or 'Reinvest Shares' in self.action) \
and self.quantity > 0:
self.action = 'Buy'
# hack to represent a correction, reverses a previous buy
if self.amount > 0:
self.quantity *= -1
elif 'Sold' in self.action and self.quantity > 0:
self.action = 'Sell'
elif 'ELECTRONIC FUNDING RECEIPT' in self.action or \
'Cash Adjustment' in self.action:
self.symbol = 'Cash'
self.action = 'Deposit'
def val(self, name, num=False):
res = 0 if num else ''
if name not in self.fields:
return res
i = self.fields.index(name)
if not num:
return self.data[i]
if self.data[i]:
return float(self.data[i])
return 0
def show(self):
if self.action not in ['Dividend Reinvest', 'Buy', 'Sell', 'Split', 'Transfer', 'Deposit']:
return
if Transaction.first:
print('----------------------------------------------------------------------------------------')
print(' date name action qty price amount comm')
print('----------------------------------------------------------------------------------------')
Transaction.first = False
print('%s %5s %35s %8.3f %7s %10s %7s' % \
(self.date.strftime('%m/%d/%y'), self.symbol, self.action, self.quantity,
'$%.2f' % (self.price),
'$%.2f' % (self.amount),
'$%.2f' % (self.comm)
))
def find_in_html(html, start, end, firstonly=True):
n, out = 0, []
while n < len(html):
m = re.search(start, html[n:])
if not m:
break
i = m.end()
m = re.search(end, html[n+i:])
if not m:
break
j = m.start()
str = html[n+i:n+i+j]
if end == 'ms':
num = re.search(r'[-+]?\d*\.\d+|\d+', str)
str = num.group() if num else 'NaN'
if firstonly:
return str
out.append(str)
n += i+j
if firstonly:
return ''
return out
def parseStockTransactions(list, broker, file):
changeover = datetime(2018, 2, 2)
reverse = True if broker == 'ameritrade' else False
count = 100000 if reverse else 0
fp = open(file, 'r')
mlast, divs, buys = False, [], []
if broker == 'ameritrade':
# hack to get the cash balance in sync with schwab
line = '01/01/2023,442250105463,CLIENT REQUESTED ELECTRONIC FUNDING RECEIPT (FUNDS NOW),,,,,0.32,,,,'
t = Transaction(broker, line, 100001)
list[t.date] = t
for line in fp:
if not line.strip() or 'DATE' in line or 'Symbol' in line or \
'END OF FILE' in line or 'Transactions' in line or 'Date' in line:
continue
m = re.match('^(?P<s>.*)SHARE CLASS CONVERSION \((?P<n>[A-Z]*)\),(?P<q>[0-9\.]*)(?P<e>.*)$', line)
if m:
if not mlast:
mlast = m
continue
line = '%sSHARE CLASS CONVERSION (%s),%s%s' % \
(m.group('s'), mlast.group('n'), mlast.group('q'), m.group('e'))
mlast = False
if broker == 'schwab':
line = line.replace('"', '').replace('$', '').replace('\n', '')
t = Transaction(broker, line, count)
if broker == 'ameritrade':
if t.date < changeover or t.id in ameritradeids:
continue
ameritradeids.append(t.id)
if t.action == 'Dividend Reinvest':
divs.append(t)
elif t.action == 'Buy':
buys.append(t)
elif broker == 'schwab':
if t.action == 'Dividend Reinvest':
divs.append(t)
elif t.action == 'Buy':
buys.append(t)
elif broker == 'scottrade':
if t.date >= changeover:
continue
count += -1 if reverse else 1
list[t.date] = t
if broker != 'ameritrade' and broker != 'schwab':
return
for div in divs:
for buy in buys:
if div.symbol == buy.symbol and div.amount == -1 * buy.amount:
div.quantity = buy.quantity
div.price = buy.price
del list[buy.date]
def parseBonds(file):
fp = open(file, 'r')
value = 0.0
print('')
print(' SAVINGS BONDS')
print('-------------------------------------')
print(' SERIAL DENOM ISSUED VALUE')
print('-------------------------------------')
for line in fp:
line = line.replace('\n', '')
f = line.split('\t')
value += float(f[3])
print('%s %6s %8s %8s' % (f[0], f[1], f[2], f[3]))
print('-------------------------------------')
print('TOTAL %10s' % '$%.2f'%value)
portfolio.networth += value
def parseOther(file):
fp = open(file, 'r')
total = 0.0
print('')
print(' %s' % file)
print('-------------------------------------')
print(' SOURCE VALUE')
print('-------------------------------------')
for line in fp:
line = line.replace('\n', '')
f = line.split('\t')
value = float(f[1])
total += value
print('%20s %15s' % (f[0], '$%.2f'%value))
print('-------------------------------------')
print('TOTAL %10s' % '$%.2f'%total)
portfolio.networth += total
def doError(msg):
print('ERROR: %s\n') % msg
sys.exit()
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate stock performance')
parser.add_argument('-v', action='store_true',
help='verbose output')
parser.add_argument('-d', metavar='date',
help='date to calculate for')
args = parser.parse_args()
date = False
verbose = args.v
if args.d:
try:
date = datetime.strptime(args.d, '%m/%d/%Y')
except:
doError('Invalid date format: %s' % args.d)
home = os.environ['HOME']+'/.finance/'
parseBonds(home+'bonds.txt')
parseOther(home+'cash.txt')
parseOther(home+'retirement.txt')
list = dict()
parseStockTransactions(list, 'scottrade', home+'scottrade.csv')
for filename in os.listdir(home+'ameritrade'):
if not re.match('^.*.csv$', filename):
continue
file = home+'ameritrade/'+filename
parseStockTransactions(list, 'ameritrade', file)
for filename in os.listdir(home+'schwab'):
if not re.match('^.*.csv$', filename):
continue
file = home+'schwab/'+filename
parseStockTransactions(list, 'schwab', file)
for d in sorted(list.keys()):
t = list[d]
if date and t.date > date:
break
if verbose:
t.show()
portfolio.stockTransaction(t)
# portfolio.showPurchases()
portfolio.getStockPrices()
portfolio.show()
print("NET WORTH = $%.2f" % portfolio.networth)
print('')
|
atomicpunk/scripts
|
finance.py
|
finance.py
|
py
| 14,573 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "urllib3.PoolManager",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "yfinance.download",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 463,
"usage_type": "call"
}
] |
18575713061
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 23:45:19 2020
@author: Niki
"""
#
import pickle
import os
from lettersEnv_2 import LettersEnv
import numpy as np
from esn_modified import ESN,identity
from sklearn.model_selection import train_test_split
import time
import string
# return valid number of time steps (disregard the empty time-steps after the episode was terminated)
def valid_steps(episode_data):
# episode_data[] = 0
# runs_acc = np.ma.masked_equal(runs_acc, float('inf'))
return episode_data[episode_data[:,0] != float('inf'),:]
# ============================== Load states
# load states file
file = 'states\\states_cost10_newenv.pkl'
with open(file, 'rb') as inp:
std = pickle.load(inp)
states = std['states']
# quickly change the indices
states['state'] = states.index
lett = [i for i in string.ascii_uppercase]
states.index=[lett.index(i) for i in states.index.tolist()]
# ============================ Create environment
#create env object
env = LettersEnv(states=states)
#============================= Load training samples=
file = 'trainingSamples\\envsampleslincost10_newenv_2.pkl'
with open(file, 'rb') as inp:
samples = pickle.load(inp)
# !!! dont forget some time steps are infinity!!!
#==================================Separate train and test
train, test = train_test_split(samples, test_size=0.2)
tr_inputs = train[:,:,:2]
tr_outputs = train[:,:,2:]
te_inputs = test[:,:,:2]
te_outputs = test[:,:,2:]
# =============================== Params
#== Params which remain unchanged
n_inputs=2
n_outputs=2
out_activation=identity
inverse_out_activation=identity
input_bias=1
#== Grid search
# depending on results, do more fine-grained after
n_ress = [10,20,50]
spec_rads = [0.7,0.8,0.9]
spars = [0.7,0.9]
inp_scals = [0.5,0.8,1]
# !!!! to test
#inr=0
#nr=n_ress[0]
#isr=0
#sr=spec_rads[0]
#isp=0
#sp=spars[0]
#iinps=0
#inps=inp_scals[0]
filenm = 'runData\\esn\\train_esn_newenv.pkl'
##========================================================== Train ESN
comb = 0
ncombs = len(n_ress)*len(spec_rads)*len(spars)*len(inp_scals)
perfs = np.tile(np.repeat(float('inf'), int(5)), (ncombs,1))
for inr,nr in enumerate(n_ress):
for isr, sr in enumerate(spec_rads):
for isp, sp in enumerate(spars):
for iinps, inps in enumerate(inp_scals):
tic=time.perf_counter()
print('Running comb '+ str(comb) + ' out of ' + str(ncombs))
num_nets = 10
perfs_networks=np.repeat(float('inf'),num_nets)
for irun in range(num_nets):
print('Instantiating network ' + str(irun))
esn = ESN(n_inputs=n_inputs,
n_outputs=n_outputs,
n_reservoir=nr,
spectral_radius=sr,
sparsity=sp,
input_weights_scaling = inps,
out_activation=out_activation,
inverse_out_activation=inverse_out_activation,
input_bias=input_bias)
print('RLS training...')
for ep in range(tr_inputs.shape[0]):
epinputs = valid_steps(tr_inputs[ep,:,:])
epoutputs = valid_steps(tr_outputs[ep,:,:])
acts = esn.get_states(epinputs, extended=True, continuation=False)
epoutputs = esn.inverse_out_activation(epoutputs)
for actval,outval in zip(acts,epoutputs):
esn.RLSfilter.process_datum(actval.reshape(-1,1), outval.reshape(-1,1))
print('Testing...')
preds = np.zeros((te_inputs.shape[0],1))
for teep in range(te_inputs.shape[0]):
epinputs = valid_steps(te_inputs[teep,:,:])
epoutputs = valid_steps(te_outputs[teep,:,:])
predsep=np.zeros(epoutputs.shape)
acts = esn.get_states(epinputs, extended=True, continuation=False)
for iact, actval in enumerate(acts):
predsep[iact,:]=esn.out_activation(esn.RLSfilter.predict(actval.reshape(-1,1)).T)
preds[teep] = np.mean(np.sum(np.abs(predsep-epoutputs),axis=1))
totalmeanerr = np.round(np.mean(preds),2)
perfs_networks[irun] = totalmeanerr
# calc mean network error
perfs_networks[np.isnan(perfs_networks)] = 0
perfs_networks[perfs_networks > 100] = 0
ma_perfs = np.ma.masked_equal(perfs_networks, 0)
meannet = np.mean(ma_perfs)
perfs[comb,:]=[nr,sr,sp,inps,meannet]
## perfs[comb,:]=[nr,sr,sp,inps,totalmeanerr]
if comb%5 == 0:
with open(filenm, 'wb') as output:
pickle.dump(perfs, output, pickle.HIGHEST_PROTOCOL)
print('Saved perfs up until here...')
comb+=1
toc=time.perf_counter()
print('Finished comb '+ str(comb) + ' in ' + str(int((toc - tic)/60)) + ' minutes.')
#save
with open(filenm, 'wb') as output:
pickle.dump(perfs, output, pickle.HIGHEST_PROTOCOL)
|
nicoleta-kyo/diss
|
train_letters_task - Copy/train_esnsole_newenv.py
|
train_esnsole_newenv.py
|
py
| 5,789 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pickle.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "lettersEnv_2.LettersEnv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "esn_modified.identity",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "esn_modified.identity",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "numpy.tile",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "esn_modified.ESN",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_equal",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "time.perf_counter",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 158,
"usage_type": "attribute"
}
] |
18834168261
|
# -*- coding: utf-8 -*-
# Author:sen
# Date:2020/3/9 20:04
from typing import List
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
a = sorted(nums)
j = len(a) - 1
# 错开'中'
for i in range(1, n, 2): # 在偶数位把数从大到小排下去
nums[i] = a[j]
j -= 1
for i in range(0, n, 2): # 在奇数位把数从大到小排下去
nums[i] = a[j]
j -= 1
return nums
if __name__ == '__main__':
nums = [1, 5, 2, 3, 6, 4]
so = Solution()
print(so.wiggleSort(nums))
|
PandoraLS/CodingInterview
|
ProgrammingOJ/LeetCode_python/324_摆动排序2.py
|
324_摆动排序2.py
|
py
| 710 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
}
] |
74073674428
|
"""Profile Model related tests."""
# Django
from django.test import TestCase
# Models
from platzigram_api.users.models import (
User
)
class ProfileModelTestCase(TestCase):
"""Profile Model Test case is a class that manages every test related to Profile model."""
def setUp(self) -> None:
"""Sets the general bars to be used on tests."""
self.user = User.objects.create_user(
username='cheke',
password='idkskere',
email='[email protected]',
first_name='Francisco Ezequiel',
last_name='Banos Ramirez',
phone_number='+52 9581006329'
)
self.profile = self.user.profile
self.user2 = User.objects.create_user(
username='hermabody',
password='idkskere',
email='[email protected]',
first_name='Eli',
last_name='Estrada'
)
self.profile2 = self.user2.profile
def test_following_other_user(self) -> None:
"""Test the functionality to follow another user."""
self.profile.follow(
self.profile2
)
self.assertIn(
self.profile2,
self.profile.following.all(
)
)
self.assertIn(
self.profile,
self.profile2.followers.all(
)
)
def test_unfollowing_other_user(self) -> None:
"""Test the functionality to unfollow another user."""
self.profile.follow(
self.profile2
)
# Following
self.assertIn(
self.profile2,
self.profile.following.all(
)
)
self.assertIn(
self.profile,
self.profile2.followers.all(
)
)
self.profile.unfollow(
self.profile2
)
# Unfollowing
self.assertNotIn(
self.profile2,
self.profile.following.all(
)
)
self.assertNotIn(
self.profile,
self.profile2.followers.all(
)
)
def test_following_an_user_does_not_cause_that_the_user_follow_you(self) -> None:
"""Test that proves that when you start to follow a user that user doesn't start to follow you."""
self.profile.follow(
self.profile2
)
self.assertIn(
self.profile2,
self.profile.following.all(
)
)
self.assertIn(
self.profile,
self.profile2.followers.all(
)
)
self.assertNotIn(
self.profile,
self.profile2.following.all(
)
)
self.assertNotIn(
self.profile2,
self.profile.followers.all(
)
)
|
ChekeGT/Platzigram-Api
|
platzigram_api/users/tests/test_models/test_profiles.py
|
test_profiles.py
|
py
| 2,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "platzigram_api.users.models.User.objects.create_user",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "platzigram_api.users.models.User.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "platzigram_api.users.models.User",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "platzigram_api.users.models.User.objects.create_user",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "platzigram_api.users.models.User.objects",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "platzigram_api.users.models.User",
"line_number": 28,
"usage_type": "name"
}
] |
26374935080
|
import os
import sys
import time
import logging
import collections
import csv
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
__all__ = ['load_partition_data_landmarks_g23k', 'load_partition_data_landmarks_g160k']
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Landmarks(data.Dataset):
def __init__(self, data_dir, allfiles, dataidxs=None, train=True, transform=None, target_transform=None,
download=False):
"""
allfiles is [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ...
{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... ]
"""
self.allfiles = allfiles
if dataidxs == None:
self.local_files = self.allfiles
else:
self.local_files = self.allfiles[dataidxs[0]: dataidxs[1]]
# print("self.local_files: %d, dataidxs: (%d, %d)" % (len(self.local_files), dataidxs[0], dataidxs[1]))
self.data_dir = data_dir
self.dataidxs = dataidxs
self.transform = transform
self.target_transform = target_transform
def __len__(self):
# if self.user_id != None:
# return sum([len(local_data) for local_data in self.mapping_per_user.values()])
# else:
# return len(self.mapping_per_user)
return len(self.local_files)
def __getitem__(self, idx):
# if self.user_id != None:
# img_name = self.mapping_per_user[self.user_id][idx]['image_id']
# label = self.mapping_per_user[self.user_id][idx]['class']
# else:
# img_name = self.mapping_per_user[idx]['image_id']
# label = self.mapping_per_user[idx]['class']
img_name = self.local_files[idx]['image_id']
label = int(self.local_files[idx]['class'])
img_name = os.path.join(self.data_dir, str(img_name) + ".jpg")
# convert jpg to PIL (jpg -> Tensor -> PIL)
image = Image.open(img_name)
# jpg_to_tensor = transforms.ToTensor()
# tensor_to_pil = transforms.ToPILImage()
# image = tensor_to_pil(jpg_to_tensor(image))
# image = jpg_to_tensor(image)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _read_csv(path: str):
with open(path, 'r') as f:
return list(csv.DictReader(f))
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_landmarks():
# IMAGENET_MEAN = [0.5071, 0.4865, 0.4409]
# IMAGENET_STD = [0.2673, 0.2564, 0.2762]
IMAGENET_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STD = [0.5, 0.5, 0.5]
image_size = 224
train_transform = transforms.Compose([
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
])
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
])
return train_transform, valid_transform
def get_mapping_per_user(fn):
"""
mapping_per_user is {'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
} or
[{'user_id': xxx, 'image_id': xxx, 'class': xxx} ...
{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... ]
}
"""
mapping_table = _read_csv(fn)
expected_cols = ['user_id', 'image_id', 'class']
if not all(col in mapping_table[0].keys() for col in expected_cols):
logger.error('%s has wrong format.', mapping_file)
raise ValueError(
'The mapping file must contain user_id, image_id and class columns. '
'The existing columns are %s' % ','.join(mapping_table[0].keys()))
data_local_num_dict = dict()
mapping_per_user = collections.defaultdict(list)
data_files = []
net_dataidx_map = {}
sum_temp = 0
for row in mapping_table:
user_id = row['user_id']
mapping_per_user[user_id].append(row)
for user_id, data in mapping_per_user.items():
num_local = len(mapping_per_user[user_id])
# net_dataidx_map[user_id]= (sum_temp, sum_temp+num_local)
# data_local_num_dict[user_id] = num_local
net_dataidx_map[int(user_id)]= (sum_temp, sum_temp+num_local)
data_local_num_dict[int(user_id)] = num_local
sum_temp += num_local
data_files += mapping_per_user[user_id]
assert sum_temp == len(data_files)
return data_files, data_local_num_dict, net_dataidx_map
# for centralized training
def get_dataloader(dataset, datadir, train_files, test_files, train_bs, test_bs, dataidxs=None):
return get_dataloader_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs)
def get_dataloader_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs=None):
dl_obj = Landmarks
transform_train, transform_test = _data_transforms_landmarks()
train_ds = dl_obj(datadir, train_files, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, test_files, dataidxs=None, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
def load_partition_data_landmarks_g23k(data_dir, batch_size=10):
fed_g23k_train_map_file = './data/landmark/data_user_dict/gld23k_user_dict_train.csv'
fed_g23k_test_map_file = './data/landmark/data_user_dict/gld23k_user_dict_test.csv'
if (not os.path.isfile(os.path.join(data_dir, fed_g23k_train_map_file))) or (not os.path.isfile(os.path.join(data_dir, fed_g23k_test_map_file))):
os.system('bash ./data_utils/download_scripts/download_landmark.sh')
client_number = 233
fed_train_map_file = fed_g23k_train_map_file
fed_test_map_file = fed_g23k_test_map_file
train_files, data_local_num_dict, net_dataidx_map = get_mapping_per_user(fed_train_map_file)
test_files = _read_csv(fed_test_map_file)
class_num = len(np.unique([item['class'] for item in train_files]))
# logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = len(train_files)
train_data_global, test_data_global = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size)
# logging.info("train_dl_global number = " + str(len(train_data_global)))
# logging.info("test_dl_global number = " + str(len(test_data_global)))
test_data_num = len(test_files)
# get local dataset
data_local_num_dict = data_local_num_dict
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(client_number):
dataidxs = net_dataidx_map[client_idx]
# local_data_num = len(dataidxs)
local_data_num = dataidxs[1] - dataidxs[0]
# data_local_num_dict[client_idx] = local_data_num
# logging.info("client_idx = %d, local_sample_number = %d" % (client_idx, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size,
dataidxs)
# logging.info("client_idx = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
# client_idx, len(train_data_local), len(test_data_local)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
client_loader = {'train': train_data_local_dict, 'test': test_data_global}
dataset_sizes = {'train': data_local_num_dict, 'test': test_data_num}
return client_loader, dataset_sizes, client_number
def load_partition_data_landmarks_g160k(data_dir, batch_size=10):
fed_g160k_train_map_file = './data/landmark/data_user_dict/gld160k_user_dict_train.csv'
fed_g160k_map_file = './data/landmark/data_user_dict/gld160k_user_dict_test.csv'
if (not os.isfile(os.path.join(data_dir, fed_g23k_train_map_file))) or (not os.isfile(os.path.join(data_dir, fed_g23k_test_map_file))):
os.system('bash ./data_utils/download_scripts/download_landmark.sh')
client_number = 1262
fed_train_map_file = fed_g160k_train_map_file
fed_test_map_file = fed_g160k_map_file
train_files, data_local_num_dict, net_dataidx_map = get_mapping_per_user(fed_train_map_file)
test_files = _read_csv(fed_test_map_file)
class_num = len(np.unique([item['class'] for item in train_files]))
# logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = len(train_files)
train_data_global, test_data_global = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size)
# logging.info("train_dl_global number = " + str(len(train_data_global)))
# logging.info("test_dl_global number = " + str(len(test_data_global)))
test_data_num = len(test_files)
# get local dataset
data_local_num_dict = data_local_num_dict
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(client_number):
dataidxs = net_dataidx_map[client_idx]
# local_data_num = len(dataidxs)
local_data_num = dataidxs[1] - dataidxs[0]
# data_local_num_dict[client_idx] = local_data_num
# logging.info("client_idx = %d, local_sample_number = %d" % (client_idx, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size,
dataidxs)
# logging.info("client_idx = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
# client_idx, len(train_data_local), len(test_data_local)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
client_loader = {'train': train_data_local_dict, 'test': test_data_global}
dataset_sizes = {'train': data_local_num_dict, 'test': test_data_num}
return client_loader, dataset_sizes, class_num, client_number
|
Jaewoo-Shin/FL_ACT
|
data_utils/landmark.py
|
landmark.py
|
py
| 11,398 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomResizedCrop",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "os.isfile",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 255,
"usage_type": "call"
}
] |
70210976508
|
import joblib
import sklearn
from src.features.missing_indicator import MissingIndicator
from src.features.cabin_only_letter import CabinOnlyLetter
from src.features.categorical_imputer_encoder import CategoricalImputerEncoder
from src.features.median_imputation import NumericalImputesEncoder
from src.features.rare_label_categorial import RareLabelCategoricalEncoder
from src.features.one_hot_encoder import OneHotEncoder
from src.features.min_max_scaler import MinMaxScaler
import pandas as pd
import re
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.datasets import make_classification
SEED_MODEL = 42
NUMERICAL_VARS = ['pclass', 'age', 'sibsp', 'parch', 'fare']
CATEGORICAL_VARS = ['sex', 'cabin', 'embarked', 'title']
TARGET = 'survived'
titanic_pipeline = Pipeline(
[
('missing_indicator', MissingIndicator(NUMERICAL_VARS)),
('cabin_only_letter', CabinOnlyLetter('cabin')),
('categorical_imputer', CategoricalImputerEncoder(CATEGORICAL_VARS)),
('median_imputation', NumericalImputesEncoder(NUMERICAL_VARS)),
('rare_labels', RareLabelCategoricalEncoder(tol=0.02, variables=CATEGORICAL_VARS)),
('dummy_vars', OneHotEncoder(CATEGORICAL_VARS)),
('scaling', MinMaxScaler()),
('log_reg', LogisticRegression(C=0.0005, class_weight='balanced', random_state=SEED_MODEL))
]
)
#data = 'src/models/cleaned_data'
# Loading data from specific url
#df = pd.read_csv(data)
URL = 'https://www.openml.org/data/get_csv/16826755/phpMYEkMl'
# Loading data from specific url
df = pd.read_csv(URL)
# Uncovering missing data
df.replace('?', np.nan, inplace=True)
df['age'] = df['age'].astype('float')
df['fare'] = df['fare'].astype('float')
# helper function 1
def get_first_cabin(row):
try:
return row.split()[0]
except:
return np.nan
# Keep only one cabin
df['cabin'] = df['cabin'].apply(get_first_cabin)
# helper function 2
def get_title(passenger):
line = passenger
if re.search('Mrs', line):
return 'Mrs'
elif re.search('Mr', line):
return 'Mr'
elif re.search('Miss', line):
return 'Miss'
elif re.search('Master', line):
return 'Master'
else:
return 'Other'
# Extract the title from 'name'
df['title'] = df['name'].apply(get_title)
# Droping irrelevant columns
DROP_COLS = ['boat','body','home.dest','ticket','name']
df.drop(DROP_COLS, axis=1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split( df.drop(TARGET, axis=1), df[TARGET], test_size=0.2, random_state=SEED_MODEL)
titanic_pipeline.fit(X_train, y_train)
preds = titanic_pipeline.predict(X_test)
print(f'Accuracy of the model is {(preds == y_test).sum() / len(y_test)}')
filename = 'titanic_pipeline_model.sav'
joblib.dump(titanic_pipeline, filename)
# El archivo se corre con python -m src.models.train.model
|
colivarese/Titanic_Pipeline_MLOps_Eq3
|
src/models/train_model.py
|
train_model.py
|
py
| 3,077 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "src.features.missing_indicator.MissingIndicator",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "src.features.cabin_only_letter.CabinOnlyLetter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "src.features.categorical_imputer_encoder.CategoricalImputerEncoder",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "src.features.median_imputation.NumericalImputesEncoder",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "src.features.rare_label_categorial.RareLabelCategoricalEncoder",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "src.features.one_hot_encoder.OneHotEncoder",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "src.features.min_max_scaler.MinMaxScaler",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "joblib.dump",
"line_number": 99,
"usage_type": "call"
}
] |
21218371819
|
import os
import requests
import sys
import subprocess
def resolve_api_url():
url = os.getenv('GITHUB_REPO_URL') or os.getenv('GITHUB_ORG_URL')
if not url:
print('Need GITHUB_REPO_URL or GITHUB_ORG_URL!')
sys.exit(1)
scope = 'repos' if os.getenv('GITHUB_REPO_URL') else 'orgs'
name = url.lstrip('https://github.com/')
return f'https://api.github.com/{scope}/{name}'
def get_runner_token():
token = os.getenv('GITHUB_RUNNER_TOKEN')
if token:
return token
api_token = os.getenv('GITHUB_API_TOKEN')
if not api_token:
print('Need GITHUB_API_TOKEN or GITHUB_RUNNER_TOKEN!')
sys.exit(1)
resp = requests.post(
f'{API_URL}/actions/runners/registration-token',
headers={
'Accept': 'application/vnd.github.v3+json',
'Authorization': f'token {api_token}',
}
)
if resp.ok:
json = resp.json()
return json['token']
else:
print("Cannot get token from API!")
sys.exit(1)
def configure_runner():
token = get_runner_token()
label = os.getenv('GITHUB_RUNNER_LABEL')
name = os.getenv('GITHUB_RUNNER_NAME')
replace = os.getenv('GITHUB_RUNNER_REPLACE')
url = os.getenv('GITHUB_REPO_URL') or os.getenv('GITHUB_ORG_URL')
work_dir = "_work"
exe = ['./config.sh',
'--unattended',
'--token', token,
'--name', name,
'--url', url,
'--work', work_dir]
if label:
exe.extend(['--labels', label])
if replace:
exe.append('--replace')
proc = subprocess.run(exe)
if proc.returncode != 0:
print(f'{" ".join(exe)} failed!')
sys.exit(1)
API_URL = resolve_api_url()
if not os.path.isfile('.runner'):
configure_runner()
if len(sys.argv) > 1:
os.execv(sys.argv[1], sys.argv[1:])
|
phoenix-rtos/phoenix-rtos-docker
|
gh-runner/entry.py
|
entry.py
|
py
| 1,868 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.execv",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 79,
"usage_type": "attribute"
}
] |
17812917902
|
import inspect
from typing import Type, List, Optional, TypeVar, Dict, Callable
from lyrid.api.actor.switch.handle_rule import HandlePolicy, HandleRule
from lyrid.api.actor.switch.property_injection import POLICY_PROPERTY, AFTER_RECEIVE_PROPERTY
from lyrid.base.actor import Actor
from lyrid.core.messaging import Message, Address
class OnReceiveDescriptor:
def __init__(self, rules: List[HandleRule], after_receive: Optional[Callable]):
self._rules = rules
self._after_receive = after_receive
self._method_map: Dict[int, OnReceiveMethod] = {}
def __get__(self, instance, owner):
id_ = id(instance)
if id_ not in self._method_map:
self._method_map[id_] = OnReceiveMethod(instance, self._rules, self._after_receive)
method = self._method_map.get(id_, None)
return method
class OnReceiveMethod:
def __init__(self, actor: Actor, rules: List[HandleRule], after_receive: Optional[Callable]):
self._actor = actor
self._rules = rules
self._after_receive = after_receive
def __call__(self, sender: Address, message: Message):
matched_rule = next((rule for rule in self._rules if rule.match(sender, message)), None)
if matched_rule is None:
return
matched_rule.execute(self._actor, sender, message)
if self._after_receive:
self._after_receive(self._actor)
A = TypeVar("A", bound=Actor)
def use_switch(actor: Type[A]) -> Type[A]:
rules: List[HandleRule] = []
after_receive: Optional[Callable] = None
for cls in inspect.getmro(actor):
if not issubclass(cls, Actor) and cls is not Actor:
continue
for method in cls.__dict__.values():
policy: Optional[HandlePolicy] = getattr(method, POLICY_PROPERTY, None)
if policy is not None:
rules.append(policy.create_handle_rule_with_function(method))
if after_receive is None and getattr(method, AFTER_RECEIVE_PROPERTY, False):
after_receive = method
setattr(actor, "on_receive", OnReceiveDescriptor(rules, after_receive))
return actor
|
SSripilaipong/lyrid
|
lyrid/api/actor/switch/use_switch.py
|
use_switch.py
|
py
| 2,159 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "lyrid.api.actor.switch.handle_rule.HandleRule",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "lyrid.base.actor.Actor",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "lyrid.api.actor.switch.handle_rule.HandleRule",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "lyrid.core.messaging.Address",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "lyrid.core.messaging.Message",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "lyrid.base.actor.Actor",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "lyrid.api.actor.switch.handle_rule.HandleRule",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "inspect.getmro",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "lyrid.base.actor.Actor",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "typing.Optional",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "lyrid.api.actor.switch.handle_rule.HandlePolicy",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "lyrid.api.actor.switch.property_injection.POLICY_PROPERTY",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name": "lyrid.api.actor.switch.property_injection.AFTER_RECEIVE_PROPERTY",
"line_number": 55,
"usage_type": "argument"
}
] |
73944800188
|
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import os
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
from pathlib import Path
import hydra
import numpy as np
import torch
from dm_env import specs
import dmc
import utils
from logger import Logger
from numpy_replay_buffer import EfficientReplayBuffer
from video import TrainVideoRecorder, VideoRecorder
from utils import load_offline_dataset_into_buffer
torch.backends.cudnn.benchmark = True
def make_agent(obs_spec, action_spec, cfg):
cfg.obs_shape = obs_spec.shape
cfg.action_shape = action_spec.shape
return hydra.utils.instantiate(cfg)
class Workspace:
def __init__(self, cfg):
self.work_dir = Path.cwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.setup()
self.agent = make_agent(self.train_env.observation_spec(),
self.train_env.action_spec(),
self.cfg.agent)
self.timer = utils.Timer()
self._global_step = 0
self._global_episode = 0
def setup(self):
# create logger
self.logger = Logger(self.work_dir, use_tb=self.cfg.use_tb, offline=self.cfg.offline,
distracting_eval=self.cfg.eval_on_distracting, multitask_eval=self.cfg.eval_on_multitask)
# create envs
self.train_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed, self.cfg.distracting_mode)
self.eval_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed, self.cfg.distracting_mode)
# create replay buffer
data_specs = (self.train_env.observation_spec(),
self.train_env.action_spec(),
specs.Array((1,), np.float32, 'reward'),
specs.Array((1,), np.float32, 'discount'))
self.replay_buffer = EfficientReplayBuffer(self.cfg.replay_buffer_size,
self.cfg.batch_size,
self.cfg.nstep,
self.cfg.discount,
self.cfg.frame_stack,
data_specs)
self.video_recorder = VideoRecorder(
self.work_dir if self.cfg.save_video else None)
self.train_video_recorder = TrainVideoRecorder(
self.work_dir if self.cfg.save_train_video else None)
self.eval_on_distracting = self.cfg.eval_on_distracting
self.eval_on_multitask = self.cfg.eval_on_multitask
@property
def global_step(self):
return self._global_step
@property
def global_episode(self):
return self._global_episode
@property
def global_frame(self):
return self.global_step * self.cfg.action_repeat
def eval(self):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
while eval_until_episode(episode):
time_step = self.eval_env.reset()
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
time_step = self.eval_env.step(action)
self.video_recorder.record(self.eval_env)
total_reward += time_step.reward
step += 1
episode += 1
self.video_recorder.save(f'{self.global_frame}.mp4')
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('step', self.global_step)
def eval_distracting(self, record_video):
distraction_modes = ['easy', 'medium', 'hard', 'fixed_easy', 'fixed_medium', 'fixed_hard']
if not hasattr(self, 'distracting_envs'):
self.distracting_envs = []
for distraction_mode in distraction_modes:
env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed, distracting_mode=distraction_mode)
self.distracting_envs.append(env)
for env, env_name in zip(self.distracting_envs, distraction_modes):
self.eval_single_env(env, env_name, record_video)
def eval_multitask(self, record_video):
multitask_modes = [f'len_{i}' for i in range(1, 11, 1)]
if not hasattr(self, 'multitask_envs'):
self.multitask_envs = []
for multitask_mode in multitask_modes:
env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed, multitask_mode=multitask_mode)
self.multitask_envs.append(env)
for env, env_name in zip(self.multitask_envs, multitask_modes):
self.eval_single_env(env, env_name, record_video)
def eval_single_env(self, env, env_name, save_video):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
while eval_until_episode(episode):
time_step = env.reset()
self.video_recorder.init(env, enabled=((episode == 0) and save_video))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
time_step = env.step(action)
self.video_recorder.record(env)
total_reward += time_step.reward
step += 1
episode += 1
self.video_recorder.save(f'{env_name}_{self.global_frame}.mp4')
self.logger.log(f'eval/{env_name}_episode_reward', total_reward / episode, self.global_frame)
def train(self):
# predicates
train_until_step = utils.Until(self.cfg.num_train_frames,
self.cfg.action_repeat)
seed_until_step = utils.Until(self.cfg.num_seed_frames,
self.cfg.action_repeat)
eval_every_step = utils.Every(self.cfg.eval_every_frames,
self.cfg.action_repeat)
# only in distracting evaluation mode
eval_save_vid_every_step = utils.Every(self.cfg.eval_save_vid_every_step,
self.cfg.action_repeat)
episode_step, episode_reward = 0, 0
time_step = self.train_env.reset()
self.replay_storage.add(time_step)
self.train_video_recorder.init(time_step.observation)
metrics = None
while train_until_step(self.global_step):
if time_step.last():
self._global_episode += 1
self.train_video_recorder.save(f'{self.global_frame}.mp4')
# wait until all the metrics schema is populated
if metrics is not None:
# log stats
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_storage))
log('step', self.global_step)
# reset env
time_step = self.train_env.reset()
self.replay_storage.add(time_step)
self.train_video_recorder.init(time_step.observation)
# try to save snapshot
if self.cfg.save_snapshot:
self.save_snapshot()
episode_step = 0
episode_reward = 0
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
if self.eval_on_distracting:
self.eval_distracting(eval_save_vid_every_step(self.global_step))
if self.eval_on_multitask:
self.eval_multitask(eval_save_vid_every_step(self.global_step))
self.eval()
# sample action
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=False)
# try to update the agent
if not seed_until_step(self.global_step):
metrics = self.agent.update(self.replay_iter, self.global_step)
self.logger.log_metrics(metrics, self.global_frame, ty='train')
# take env step
time_step = self.train_env.step(action)
episode_reward += time_step.reward
self.replay_storage.add(time_step)
self.train_video_recorder.record(time_step.observation)
episode_step += 1
self._global_step += 1
def train_offline(self, offline_dir):
# Open dataset, load as memory buffer
load_offline_dataset_into_buffer(Path(offline_dir), self.replay_buffer, self.cfg.frame_stack,
self.cfg.replay_buffer_size)
if self.replay_buffer.index == -1:
raise ValueError('No offline data loaded, check directory.')
# predicates
train_until_step = utils.Until(self.cfg.num_train_frames, 1)
eval_every_step = utils.Every(self.cfg.eval_every_frames, 1)
show_train_stats_every_step = utils.Every(self.cfg.show_train_stats_every_frames, 1)
# only in distracting evaluation mode
eval_save_vid_every_step = utils.Every(self.cfg.eval_save_vid_every_step,
self.cfg.action_repeat)
metrics = None
step = 0
while train_until_step(self.global_step):
if show_train_stats_every_step(self.global_step):
# wait until all the metrics schema is populated
if metrics is not None:
# log stats
elapsed_time, total_time = self.timer.reset()
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', step / elapsed_time)
log('total_time', total_time)
log('buffer_size', len(self.replay_buffer))
log('step', self.global_step)
step = 0
# try to save snapshot
if self.cfg.save_snapshot:
self.save_snapshot()
step += 1
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
if self.eval_on_distracting:
self.eval_distracting(eval_save_vid_every_step(self.global_step))
if self.eval_on_multitask:
self.eval_multitask(eval_save_vid_every_step(self.global_step))
self.eval()
# try to update the agent
metrics = self.agent.update(self.replay_buffer, self.global_step)
if show_train_stats_every_step(self.global_step):
self.logger.log_metrics(metrics, self.global_frame, ty='train')
self._global_step += 1
def save_snapshot(self):
snapshot = self.work_dir / 'snapshot.pt'
keys_to_save = ['agent', 'timer', '_global_step', '_global_episode']
payload = {k: self.__dict__[k] for k in keys_to_save}
with snapshot.open('wb') as f:
torch.save(payload, f)
def load_snapshot(self):
snapshot = self.work_dir / 'snapshot.pt'
with snapshot.open('rb') as f:
payload = torch.load(f)
for k, v in payload.items():
self.__dict__[k] = v
@hydra.main(config_path='cfgs', config_name='config')
def main(cfg):
from train import Workspace as W
root_dir = Path.cwd()
workspace = W(cfg)
print(cfg)
snapshot = root_dir / 'snapshot.pt'
if snapshot.exists():
print(f'resuming: {snapshot}')
workspace.load_snapshot()
if cfg.offline:
workspace.train_offline(cfg.offline_dir)
else:
workspace.train()
if __name__ == '__main__':
main()
|
conglu1997/v-d4rl
|
drqbc/train.py
|
train.py
|
py
| 13,697 |
python
|
en
|
code
| 64 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "hydra.utils.instantiate",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "hydra.utils",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "utils.set_seed_everywhere",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "utils.Timer",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logger.Logger",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "dmc.make",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "dmc.make",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "dm_env.specs.Array",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "dm_env.specs",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "dm_env.specs.Array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "dm_env.specs",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy_replay_buffer.EfficientReplayBuffer",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "video.VideoRecorder",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "video.TrainVideoRecorder",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "utils.Until",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "utils.eval_mode",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "dmc.make",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "dmc.make",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "utils.Until",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "utils.eval_mode",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "utils.Until",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "utils.Until",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "utils.Every",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "utils.Every",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "utils.eval_mode",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "utils.load_offline_dataset_into_buffer",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "utils.Until",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "utils.Every",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "utils.Every",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "utils.Every",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "train.Workspace",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "hydra.main",
"line_number": 303,
"usage_type": "call"
}
] |
9063639099
|
import datetime
import logging
import time
import os
import torch
from atss_core.config import cfg
import torch.distributed as dist
from atss_core.utils.comm import get_world_size, is_pytorch_1_1_0_or_later
from atss_core.utils.metric_logger import MetricLogger
stamps = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
):
## logging ##
logger = logging.getLogger("atss_core.trainer")
logger.info("\n^_^-->The Program is Starting training:\n")
meters = MetricLogger(delimiter=" ")
infox = MetricLogger(delimiter=" ")
## SETTING FOR "maximum number of iterations" ##
max_iter = len(data_loader)
print("max_iter:----->", max_iter)
print("max_iter:----->", max_iter)
# max_iter = 20000
# max_iter = cfg.SOLVER.EPOCH * len(data_loader)
# max_iter = cfg.SOLVER.MAX_ITER
"""
checkpointers = torch.load(cfg.MODEL.LOAD_PTH)
model = torch.load(cfg.MODEL.LOAD_PTH)
model = torch.load(cfg.MODEL.LOAD_PTH)
print("checkpointers.keys():\n", checkpointers.keys())
print("type of model:\n", type(checkpointers["model"]))
print("checkpointers[model].keys():\n", checkpointers["model"].keys())
print("type of iteration:\n", type(checkpointers["iteration"]))
print("checkpointers[iteration].keys():\n", checkpointers["iteration"].keys())
model.load(checkpointers["model"])
model.load_state_dict(torch.load(cfg.MODEL.LOAD_PTH))
"""
## To load trained model .pth ##
#PATH = "/home/alanc/Documents/ATSS/training_dir/atss_R_50_FPN_1x-3/model_final_2020-11-11_20-22-08.pth"
##################################################################################
#print("The model is :\n",model)
#print("The state dict of model:\n",model.state_dict)
# print("The state dict of model:\n")
# for parameters in model.parameters():
# print(parameters)
####################################################################################
#PATH = "/home/alanc/Documents/ATSS/trained_pth/ATSS_R_101_FPN_2x.pth"
#PATH = "/home/alanc/Documents/ATSS/trained_pth2/ATSS_dcnv2_R_101_FPN_2x.pth"
#PATH = "/home/alanc/Documents/ATSS/trained_pth2/ATSS_X_101_32x8d_FPN_2x.pth"
#PATH = "/home/alanc/Documents/ATSS/trained_pth2/ATSS_dcnv2_X_101_32x8d_FPN_2x.pth"
PATH = "/home/alanc/Documents/ATSS/trained_pth/ATSS_dcnv2_X_101_64x4d_FPN_2x.pth"
# model.load_state_dict(torch.load(PATH)["model"], strict=False)
# model.load_state_dict(torch.load(cfg.MODEL.LOAD_PTH)["model"], strict=False)
# Checkpoint = torch.load(PATH)
# model_dict = model.state_dict()
# model_dict.update(Checkpoint)
# model.load_state_dict(model_dict, strict=False)
pretrained_dict = torch.load(PATH)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
###################################################################################
#print("The new model is :\n", model)
#print("The state dict of new model:\n", model.state_dict)
# print("The state dict of new model:\n")
# for parameters in model.parameters():
# print(parameters)
####################################################################################
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
pytorch_1_1_0_or_later = is_pytorch_1_1_0_or_later()
## JUST FOR TRAINING ##
for j in range(1, cfg.SOLVER.EPOCH + 1, 1):
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
# in pytorch >= 1.1.0, scheduler.step() should be run after optimizer.step()
if not pytorch_1_1_0_or_later:
scheduler.step()
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
if pytorch_1_1_0_or_later:
scheduler.step()
batch_time = time.time() - end
end = time.time()
infox.update(time=batch_time, data=data_time)
eta_seconds = infox.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
"""
Form of output
"""
if iteration % 50 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"EPOCH: {EPOCH}",
"eta: {eta}",
"iter: {iter}",
"\n->{meters}",
"\n->Running info: {info}",
"\n->Learning Rate(lr): {lr:.6f}",
"\n->Max mem: {memory:.0f}",
]
).format(
EPOCH=str(j),
eta=eta_string,
iter=iteration,
meters=str(meters),
info=str(infox),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{}_{:07d}_{}".format(j, iteration, stamps), **arguments)
if iteration == max_iter:
checkpointer.save("model_{}_{:07d}_{}".format(j, iteration, stamps), **arguments)
print("^_^-->The program has reached the maximum number of iterations(max_iter) and has been stopped")
break
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
|
Alan-D-Chen/CDIoU-CDIoUloss
|
atss_core/engine/trainer.py
|
trainer.py
|
py
| 7,619 |
python
|
en
|
code
| 25 |
github-code
|
6
|
[
{
"api_name": "time.strftime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "atss_core.utils.comm.get_world_size",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.distributed.reduce",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.distributed.get_rank",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "atss_core.utils.metric_logger.MetricLogger",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "atss_core.utils.metric_logger.MetricLogger",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "atss_core.utils.comm.is_pytorch_1_1_0_or_later",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "atss_core.config.cfg.SOLVER",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "atss_core.config.cfg",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.cuda.max_memory_allocated",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 184,
"usage_type": "call"
}
] |
23084042766
|
from pyrfuniverse.envs import RFUniverseGymGoalWrapper
from pyrfuniverse.utils import RFUniverseToborController
import numpy as np
from gym import spaces
from gym.utils import seeding
import math
import pybullet as p
class ToborPushPullEnv(RFUniverseGymGoalWrapper):
metadata = {"render.modes": ["human"]}
def __init__(
self,
max_steps,
asset_bundle_file,
pull=True,
executable_file=None,
threshold_angle=None,
):
super().__init__(
executable_file=executable_file,
articulation_channel=True,
)
self.max_steps = max_steps
self.pull = pull
self.asset_bundle_file = asset_bundle_file
self.object_name_prefix = "Microwave_"
self.objects = [self.object_name_prefix + str(i + 1) for i in range(15)]
self.object_position_range_low = np.array([-0.3, 0.4, 1.3])
self.object_position_range_high = np.array([-0.1, 0.6, 1.5])
self.tobor_action_space_low = np.array([-0.2, 0, -0.6])
self.tobor_action_space_high = np.array([0.2, 0.4, -0.3])
self.eef_orn = np.array([0, 0, math.pi / 2])
self.episode_object_position = self.object_position_range_high.copy()
self.threshold_angle = threshold_angle
if self.threshold_angle is None:
if self.pull:
self.threshold_angle = 45.0
else:
self.threshold_angle = 15.0
self.ik_controller = RFUniverseToborController(
urdf_folder="/home/haoyuan/workspace/tobor",
left_hand="robotiq85",
right_hand="robotiq85",
left_init_joint_positions=[0] * 7,
right_init_joint_positions=[0] * 7,
)
self.seed()
self.t = 0
self.goal = self._sample_goal()
self._reset_object()
self.action_space = spaces.Box(low=-1, high=1, shape=(5,), dtype=np.float32)
obs = self._get_obs()
self.observation_space = spaces.Dict(
{
"observation": spaces.Box(
-np.inf, np.inf, shape=obs["observation"].shape, dtype=np.float32
),
"desired_goal": spaces.Box(
-np.inf, np.inf, shape=obs["desired_goal"].shape, dtype=np.float32
),
"achieved_goal": spaces.Box(
-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype=np.float32
),
}
)
def step(self, action: np.ndarray):
# Position control
action_ctrl = action.copy()
pos_ctrl = action_ctrl[:3] * 0.05
curr_pos = self._get_tobor_eef_position("left")
pos_ctrl = pos_ctrl + curr_pos
pos_ctrl = np.clip(
pos_ctrl,
self.episode_object_position + self.tobor_action_space_low,
self.episode_object_position + self.tobor_action_space_high,
)
# Rotation control
rot_ctrl_x = action_ctrl[3] * 5 / math.pi
curr_rot_x = float(self.eef_orn[0])
rot_ctrl_x = np.clip(rot_ctrl_x + curr_rot_x, -math.pi / 3, 0)
self.eef_orn = np.array([rot_ctrl_x, 0, math.pi / 2])
self._set_tobor_arm(
mode="left", position=pos_ctrl, eef_euler_angles=self.eef_orn
)
# Gripper width control
gripper_width = self._get_gripper_width("left")
gripper_width_ctrl = np.clip(gripper_width + action_ctrl[4] * 0.2, 0, 0.085)
gripper_angle = self._compute_gripper_angle(gripper_width_ctrl)
self._set_tobor_gripper(mode="left", gripper_angle=gripper_angle)
self.t += 1
obs = self._get_obs()
done = False
is_success = self._check_success(obs)
info = {"is_success": is_success}
reward = self.compute_reward(obs["achieved_goal"], obs["desired_goal"], info)
if is_success > 0 or self.t == self.max_steps:
done = True
obs = self.reset()
return obs, reward, done, info
def reset(self):
super().reset()
self.env.reset()
self.ik_controller.reset()
self.t = 0
self.goal = self._sample_goal()
self._destroy_object()
self._reset_object()
# Set Tobor arm directly to handle to reduce exploring space
handle_position = self._get_handle_position()
if self.pull:
self.eef_orn = np.array([0, 0, math.pi / 2])
else:
self.eef_orn = np.array([-math.pi / 3, 0, math.pi / 2])
self._set_tobor_arm_directly("left", handle_position, list(self.eef_orn))
return self._get_obs()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self, mode="human"):
self._step()
def compute_reward(
self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: dict
):
d = self._compute_distance(achieved_goal / 10, desired_goal / 10)
return -d
def _get_obs(self):
tobor_eef_position = self._get_tobor_eef_position("left")
tobor_eef_orientation = self.eef_orn.copy()
tobor_eef_width = [self._get_gripper_width("left")]
tobor_obs = np.concatenate(
(tobor_eef_position, tobor_eef_orientation, tobor_eef_width)
)
handle_position = self._get_handle_position()
door_axis_position = self._get_door_axis_position()
open_angle = [self._get_door_open_angle()]
object_obs = np.concatenate((handle_position, door_axis_position, open_angle))
obs = np.concatenate((tobor_obs, object_obs))
return {
"observation": obs.copy(),
"achieved_goal": np.array(open_angle),
"desired_goal": self.goal.copy(),
}
def _sample_goal(self):
return np.array([self.threshold_angle])
def _destroy_object(self):
self.articulation_channel.set_action(
"Destroy",
index=4,
)
self._step()
def _reset_object(self):
# object_idx = self.np_random.randint(0, len(self.objects))
object_idx = 0
object_position = self.np_random.uniform(
self.object_position_range_low, self.object_position_range_high
)
self.episode_object_position = object_position.copy()
self.asset_channel.set_action(
"LoadArticulationBody",
filename=self.asset_bundle_file,
name=self.objects[object_idx],
position=list(object_position),
rotation=[0, 180, 0],
)
self._step()
# Set init joint position
if not self.pull:
self.articulation_channel.set_action(
"SetJointPositionDirectly", index=4, joint_positions=[60]
)
self._step()
def _get_handle_position(self):
handle_position = self.articulation_channel.data[4]["positions"][2]
return np.array(handle_position)
def _get_door_axis_position(self):
door_axis_position = self.articulation_channel.data[4]["positions"][1]
return np.array(door_axis_position)
def _get_door_open_angle(self):
return self.articulation_channel.data[4]["joint_positions"][0]
def _get_tobor_eef_position(self, mode):
assert mode in ["left", "right"], "Mode is either 'left' or 'right'"
if mode == "left":
left_eef_position = self.articulation_channel.data[1]["positions"][11]
return np.array(left_eef_position)
else:
right_eef_position = self.articulation_channel.data[3]["positions"][11]
return np.array(right_eef_position)
def _get_gripper_width(self, mode):
assert mode in ["left", "right"], "Mode is either 'left' or 'right'"
idx = 1
if mode == "right":
idx = 3
right_inner_finger_pos = np.array(
self.articulation_channel.data[idx]["positions"][5]
)
left_inner_finger_pos = np.array(
self.articulation_channel.data[idx]["positions"][10]
)
width = self._compute_distance(right_inner_finger_pos, left_inner_finger_pos)
# The position is at the center of inner_finger, so we must get rid of the width of inner finger,
# to get accurate gripper width
width = width - 0.00635
return width
def _calculate_tobor_arm_joint_positions(self, mode, position, eef_euler_angles):
assert mode in ["left", "right"], "Mode is either 'left' or 'right'"
arm_index = 0
if mode == "left":
arm_index = 0
elif mode == "right":
arm_index = 2
eef_orn = p.getQuaternionFromEuler(eef_euler_angles)
joint_positions = self.ik_controller.calculate_ik(mode, position, eef_orn)
return arm_index, joint_positions
def _set_tobor_arm(self, mode, position, eef_euler_angles):
arm_index, joint_positions = self._calculate_tobor_arm_joint_positions(
mode, position, eef_euler_angles
)
self.articulation_channel.set_action(
"SetJointPosition", index=arm_index, joint_positions=joint_positions
)
self._step()
def _set_tobor_arm_directly(self, mode, position, eef_euler_angles):
arm_index, joint_positions = self._calculate_tobor_arm_joint_positions(
mode, position, eef_euler_angles
)
self.articulation_channel.set_action(
"SetJointPositionDirectly", index=arm_index, joint_positions=joint_positions
)
self._step()
def _set_tobor_gripper(self, mode, gripper_angle):
assert mode in ["left", "right"], "Mode is either 'left' or 'right'"
gripper_index = 0
if mode == "left":
gripper_index = 1
elif mode == "right":
gripper_index = 3
self.articulation_channel.set_action(
"SetJointPosition",
index=gripper_index,
joint_positions=[gripper_angle, gripper_angle],
)
self._step()
def _compute_distance(self, point_a, point_b):
return np.linalg.norm(point_a - point_b, axis=-1)
def _compute_gripper_angle(self, width):
angle_rad = 0.715 - math.asin((width - 0.01) / 0.1143)
angle_deg = angle_rad * 180 / math.pi
return angle_deg
def _check_success(self, obs):
achieved_goal = obs["achieved_goal"][0]
desired_goal = obs["desired_goal"][0]
if self.pull:
success = (desired_goal < achieved_goal).astype(np.float32)
else:
success = (desired_goal > achieved_goal).astype(np.float32)
return success
|
mvig-robotflow/pyrfuniverse
|
pyrfuniverse/envs/tobor_robotics/tobor_push_pull_env.py
|
tobor_push_pull_env.py
|
py
| 10,739 |
python
|
en
|
code
| 39 |
github-code
|
6
|
[
{
"api_name": "pyrfuniverse.envs.RFUniverseGymGoalWrapper",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pyrfuniverse.utils.RFUniverseToborController",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "gym.spaces.Box",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Dict",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "gym.spaces.Box",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Box",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Box",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "gym.utils.seeding.np_random",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "gym.utils.seeding",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "pybullet.getQuaternionFromEuler",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "math.asin",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 301,
"usage_type": "attribute"
}
] |
4491655960
|
from imp import find_module
from typing import Dict, Set, List
from essence import *
import pickle
import os
import json
import re
from utils import *
import subprocess
# TODO:
# * We're still depending upon no kaslr, remove the need for it.
# * Make generation more efficient.
KPATCH_BINARY_PATH = "kpatch/kpatch-build/kpatch-build"
NUMBER_OF_CONCURRENT_MAKE_JOBS = 28
WORKLOAD_PATH: str = "/home/samad/lp_test/dummy-workload.sh"
VMLINUX_PATH: str = "vmlinux"
KPATCH_SRC_DIR_TREE = "linux-4.9.31/"
KPATCH_SRC_MOD_DIR_TREE = "linux-4.9.31-mod/"
SOURCE_PARSER_LINUX_TREE: str = "build/linux-4.9.31/"
SOURCE_TREE_FILE_ENCODING: str = "iso-8859-1"
# KERNEL_DEBUG_BINARY_LINUX_TREE: str = Binutils.find_build_root(VMLINUX_PATH) # The path in which the kernel binary was built.
DEP_SOLVER_LINUX_TREE: str = "linux-4.9.31/"
DEP_SOLVER_KERNEL_VERSION: str = "4.19.231"
DEP_SOLVER_GCC_VERSION_TEXT: str = "gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0" # GCC version of your compiler.
def setup_environment() -> None:
# Environment Variables used for kconfiglib. Edit them as you may deem fit.
os.environ["srctree"] = DEP_SOLVER_LINUX_TREE
os.environ["ARCH"] = "x86"
os.environ["SRCARCH"] = "x86"
os.environ["KERNELVERSION"] = DEP_SOLVER_KERNEL_VERSION
os.environ["CC"] = "gcc"
os.environ["HOSTCC"] = "gcc"
os.environ["HOSTCXX"] = "g++"
os.environ["CC_VERSION_TEXT"] = DEP_SOLVER_GCC_VERSION_TEXT
os.environ["LD"] = "ld"
def trace_kernel() -> List[str]:
# Tracing the kernel source here.
if not os.path.exists(".tracercache"):
t = tracer.FtraceTracer(VMLINUX_PATH)
s = t.trace(WORKLOAD_PATH)
print("Workload has Finished running.")
sources = list(t.addrs_to_kernel_source_lines(s))
with open(".tracercache", "wb") as fobj:
pickle.dump(sources, fobj)
else:
print("Using prebuilt tracer cache.")
with open(".tracercache", "rb") as fobj:
sources = pickle.load(fobj)
print(f"Length of source file array is: {len(sources)}")
return sources
def parse_source(sources):
p = parser.LinuxParser(SOURCE_PARSER_LINUX_TREE)
# Parsing the source for configs, etc.
print("Parsing kernel source......")
if not os.path.exists(".parsercache"):
print("Building cache...")
p.parse()
p.parse_c_files_for_configs()
with open(".parsercache", "wb") as fobj:
pickle.dump(p, fobj)
print('Done building parser cache.')
else:
print("Using prebuilt parser cache, Done.")
with open(".parsercache", "rb") as fobj:
p = pickle.load(fobj)
# Module Tracing and config generation. The workload should write the content of of /proc/modules.
# TODO: Make the framework do this not the workload. Maybe take random samples during runtime.
modules = None
with open("modules", "r") as fobj:
modules = fobj.read()
modules = [f"{i.split(' ')[0]}" for i in modules.split("\n") if i]
config_mods = set()
for filename in p.makefile_config_map.keys():
for module_name in modules:
if module_name in filename.split("/")[-1]:
config_mods.update(p.makefile_config_map[filename])
print(f"Configs genarted from mods: {len(config_mods)} \n")
# Genrating the configs from traced source files
configs_src = []
for traced_source_line in sources:
_fpath,_linum = traced_source_line.split(':')
_fpath = _fpath[len(KERNEL_DEBUG_BINARY_LINUX_TREE):]
_fpath = os.path.abspath(SOURCE_PARSER_LINUX_TREE.rstrip('/') + _fpath)
k = p.query(_fpath, int(_linum) if _linum.isdigit() else None)
configs_src.append(k)
configs_src = [i for i in configs_src if i]
print(f"Length of configs genrated from traced functions after filtering are: {len(configs_src)}.")
# Combining configs from LKMs and Source trace.
for i in configs_src:
for j in i:
config_mods.add(j)
print(
f"Total unique configs genarted after combining modules and traced functions: {len(config_mods)}"
)
return config_mods, p.config_to_c_file_map
def get_current_build_configs():
# Getting the current set of configs with which the current kernel is built.
with open(f"/boot/config-{os.uname().release}") as f:
config = f.read()
build_configs = re.findall(r"\n(CONFIG_[A-Z,0-9,_,-]+)=(?:y|m)", config)
return build_configs
def get_src_files_from_configs(unused_configs, config_to_c_file_map):
# Inefficiently figuring out the source file names with contains those specific configs.
unused_configs_to_file_map: Dict[str, Set[str]] = {i: set() for i in unused_configs}
for i in unused_configs:
try:
for file in config_to_c_file_map[i]:
unused_configs_to_file_map[i].add(file)
except:
pass
for i in unused_configs_to_file_map.copy().keys():
if not len(unused_configs_to_file_map[i]):
unused_configs_to_file_map.pop(i, None)
return unused_configs_to_file_map
def fish_function_defs_under_configs(unused_configs_to_file_map):
funcs = set()
prog = ProgressCounter("\nFunction capture",len(unused_configs_to_file_map.keys()),1)
config_file_func_map : Dict[ str , Dict [ str , Set[str] ]] = dict()
for config in unused_configs_to_file_map.keys():
prog.update()
files = unused_configs_to_file_map[config]
for file in files:
# Here we check for a function definition and it's call within the config block of the file.
# If there is a function which is defined and called within a same config, we register it for patching.
if file[-2:] == ".h":continue
with open(file, "r", encoding=SOURCE_TREE_FILE_ENCODING) as f:
source = f.read()
xc = r"#ifdef\s+"+config+r".+?#endif"
configs = re.findall(xc, source, re.M | re.DOTALL)
string_under_config = "".join(configs)
function_defs = re.findall(
r"^(?:[a-z,0-9,_,-]+\s)+([a-z,0-9,_,-]+\s*\()",
string_under_config,
re.M,
)
for func in function_defs:
# Some filtering
if 'notrace' in func or func.startswith('_'):
continue
try:
_tmp = config_file_func_map[config]
try:
_tmp[file].append(func)
except:
_tmp[file] = [func,]
except:
config_file_func_map[config] = {file : [func,]}
return config_file_func_map
def check_tokens_in_str(string, itr = ['static','void','int','float','double','u32',]):
# The function which can't be traced can't be patched.
if 'notrace' in string:
return False
for i in ['if ', ' if(', 'do ', 'do {', '__init']:
if i in string:
print(f"Skipping line: {string}")
return False
for i in itr:
if i in string:return True
return False
def find_balance(string, bracket='curly'):
if bracket == 'curly':return string.count('{') - string.count('}')
elif bracket == 'round':return string.count('(') - string.count(')')
else:
raise TypeError("Unknow Bracket type. Choose curly or round.")
def find_function_linums(final_map:dict,):
_tree = dict()
_src_parser_tree_abs = len( os.path.abspath(SOURCE_PARSER_LINUX_TREE) )
for config in list(final_map.keys()):
_tree[config] = dict()
for file in final_map[config]:
_genric_file_name = '[kernel_tree_root]/'+file[_src_parser_tree_abs:].lstrip('/')
_tree[config][ _genric_file_name ] = dict()
with open(file, 'r', encoding=SOURCE_TREE_FILE_ENCODING) as f:
raw = f.read()
for function in final_map[config][file]:
k = raw.split('\n')
function_found = False
balance = 0
primary_found = False
initial_linum = -1
for linum, i in enumerate(k):
if function_found and balance > 0:
primary_found = True
if function_found:
balance += find_balance(i)
if primary_found and balance == 0:
fname = os.path.abspath("linux-4.9.31-mod"+file[len('/home/samad/lp_test/build/linux-4.9.31'):])
_tree[config][ _genric_file_name ][function] = {
'start_linum': initial_linum + 1,
'end_linum': linum + 1,
}
break
continue
if function in i and check_tokens_in_str(i):
balance += find_balance(i)
initial_linum = linum
function_found = True
return _tree
def genrate_patch(tree, kp_mod_directory_tree , kp_src_dir_tree , log : progressTracker):
try:os.makedirs('kpatch-diffs/')
except Exception as err:print(f"Dir already created or {err}")
with open('kpatch-diffs/tree.json' , 'w') as fobj:
__result = json.dumps(tree, indent = 2)
fobj.write(__result)
prompt = True
print(f"Ask before genrating patch for each config (y/n):", end='')
choice = input("")
if choice == 'n':
prompt = False
# prog = ProgressCounter("\nPatch Creation",len(tree.keys()),1)
for config in tree.keys():
# prog.update()
# Skip if the patch has already been built.
if log.check(config):
print(f"Skipping building for {config} as it's already there in log.\n")
continue
if prompt:print(f"\nCreate Patch for {config} (y/n):", end='')
if prompt:choice = input("")
if prompt:
if choice == 'n':
print(f"Skipping Patch for config {config}.")
continue
else:
print(f"Genrating patch for config {config}.")
if not prompt:print(f"\nTrying to building monolithic patch for config: {config}: " , end='')
# Genrate the diffs for each file under the config.
for filename in tree[config].keys():
_actual_filename = filename.replace( '[kernel_tree_root]' , kp_mod_directory_tree.rstrip('/'))
_actual_non_mod_filename = filename.replace( '[kernel_tree_root]' , kp_src_dir_tree.rstrip('/'))
_clean_file = filename.replace( '[kernel_tree_root]' , "tmp/linux-4.9.31")
with open(_clean_file,'r') as forig:
__text = forig.read()
__file = __text.split('\n')
original_line_count = len(__file)
with open(_actual_filename,'w') as f_mod:f_mod.write(__text)
with open(_actual_non_mod_filename,'w') as f_mod:f_mod.write(__text)
for function in tree[config][filename].keys():
with open(_actual_filename) as fobj:file = fobj.read().split('\n')
_current_line_count = len(file)
# Since while replacing stuff, this code can only increse line count (of the file) and not decrease it.
start_linum = tree[config][filename][function]['start_linum'] + _current_line_count - original_line_count
end_linum = tree[config][filename][function]['end_linum'] + _current_line_count - original_line_count
if prompt:print(f"{_actual_filename}:{start_linum}")
k = '\n\n'
for i in range(start_linum-1, end_linum):
ql = file[i]
k += ql
file[i] = ''
ptr = r'\{.*\}' # The DOT is greedy on purpose.
k = re.sub( ptr , '{\n}' , k , re.DOTALL)
file[start_linum] = k
with open(_actual_filename, 'w') as f:f.write( '\n'.join(file) )
o = f"diff -u {_actual_non_mod_filename} {_actual_filename} > kpatch-diffs/{config}-{filename.split('/')[-1]}.patch"
diff = subprocess.call(
o,
shell=True,
)
# Run all patched files under the config with kpatch.
# print(f'CMDLINE: {KPATCH_BINARY_PATH} -t vmlinux -v {VMLINUX_PATH} -R --skip-compiler-check -s {KPATCH_SRC_DIR_TREE} -j {NUMBER_OF_CONCURRENT_MAKE_JOBS} -o kpatch_objects/ -n {config}-all.ko kpatch-diffs/{config}-*')
ret_code = subprocess.call(
[f'{KPATCH_BINARY_PATH} -t vmlinux -v {VMLINUX_PATH} -R --skip-compiler-check -s {KPATCH_SRC_DIR_TREE} -j {NUMBER_OF_CONCURRENT_MAKE_JOBS} -o kpatch_objects/ -n {config}-all.ko kpatch-diffs/{config}-*',],
shell = True,
stdout=open('/dev/null' , 'w'),
stderr=open('/dev/null' , 'w'),
)
# Building all the file togather fail
if ret_code != 0:
print(f"Failed")
# print(f"Files are: \n {tree[config].keys()}\n")
# input("Go?: ")
# Try building for each file separately.
for filename in tree[config].keys():
print(f"Trying creating a patch for {filename.split('/')[-1]} under the config {config} : " , end='')
patch = f"kpatch-diffs/{config}-{filename.split('/')[-1]}.patch"
# cmx = f'{KPATCH_BINARY_PATH} -t vmlinux -v {VMLINUX_PATH} -R --skip-compiler-check -s {KPATCH_SRC_DIR_TREE} -j {NUMBER_OF_CONCURRENT_MAKE_JOBS} -o kpatch_objects/ -n {config}-split-{filename}.ko {patch}'
# print(f"CMDLINE: \n\n {cmx} \n")
ret_code = subprocess.call(
[f'{KPATCH_BINARY_PATH} -t vmlinux -v {VMLINUX_PATH} -R --skip-compiler-check -s {KPATCH_SRC_DIR_TREE} -j {NUMBER_OF_CONCURRENT_MAKE_JOBS} -o kpatch_objects/ -n {config}-split-{filename}.ko {patch}'],
shell = True,
stdout=open('/dev/null' , 'w'),
stderr=open('/dev/null' , 'w'),
)
if ret_code:print("Failed")
else: print("Success.")
else:
print(f"Success!")
for file in tree[config].keys():
_actual_filename = file.replace( '[kernel_tree_root]' , kp_mod_directory_tree.rstrip('/'))
_actual_non_mod_filename = file.replace( '[kernel_tree_root]' , kp_src_dir_tree.rstrip('/'))
_clean_file = file.replace( '[kernel_tree_root]' , "tmp/linux-4.9.31")
with open(_clean_file,'r') as forig:
__text = forig.read()
__file = __text.split('\n')
original_line_count = len(__file)
with open(_actual_filename,'w') as f_mod:f_mod.write(__text)
with open(_actual_non_mod_filename,'w') as f_mod:f_mod.write(__text)
os.remove(f"kpatch-diffs/{config}-{file.split('/')[-1]}.patch")
# print("Removed the mod and the patch.")
log.flush()
if __name__ == "__main__":
# setup_environment()
# dep_solver = kconfDepSolver()
# traced_sources = trace_kernel()
# configs,config_to_c_file_map = parse_source(traced_sources)
# final_dep_solved_configs = dep_solver.solve_dependencies(configs)
# print(f"Total configs genrated after dependency resolution is: {len(final_dep_solved_configs)}")
# build_configs = get_current_build_configs()
# Taking the diffrence of two sets.
# unused_configs = [i for i in build_configs if i not in final_dep_solved_configs]
# unused_configs_to_file_map = get_src_files_from_configs(unused_configs, config_to_c_file_map)
# final_map = fish_function_defs_under_configs(unused_configs_to_file_map)
# _tree = find_function_linums(final_map)
# original_dir_name = "linux-4.9.31"
p = progressTracker('prog_1')
t = json.load(open('tree.json'))
genrate_patch( t, KPATCH_SRC_MOD_DIR_TREE, KPATCH_SRC_DIR_TREE , p)
|
ubdussamad/kptemp
|
main.py
|
main.py
|
py
| 16,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.uname",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "re.M",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "re.DOTALL",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "re.M",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "subprocess.call",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 400,
"usage_type": "call"
}
] |
39400557947
|
import os
import argparse
from typing import Tuple, Union, List, Dict, Any, Optional, Callable
import logging
import sys
import json
import pickle
import base64
import ast
from IPython.display import Image
from itertools import combinations
import operator
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer
from sklearn.inspection import permutation_importance
import numpy as np
import pandas as pd
import optuna
from optuna.trial import TrialState
import boto3
from botocore.exceptions import ClientError
# ---------------------------------- Logger ---------------------------------- #
def get_logger(name: str) -> logging.Logger:
"""
Parameters
----------
name : str
A string that specifies the name of the logger.
Returns
-------
logging.Logger
A logger with the specified name.
"""
logger = logging.getLogger(name) # Return a logger with the specified name
log_format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(log_format))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
# --------------------- Parse argument from command line --------------------- #
def parser() -> argparse.ArgumentParser:
"""
Function that parses arguments from command line.
Returns
-------
argparse.ArgumentParser
An ArgumentParser object that contains the arguments passed from command line.
"""
parser = argparse.ArgumentParser()
# Optuna database
parser.add_argument('--host', type=str)
parser.add_argument('--db_name', type=str, default='optuna')
parser.add_argument('--db_secret', type=str, default='optuna/db')
parser.add_argument('--region_name', type=str, default='us-east-1')
parser.add_argument('--n_trials', type=int, default=20)
# Data, model, and output directories
parser.add_argument('--model_dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--training_env', type=str, default=json.loads(os.environ['SM_TRAINING_ENV']))
parser.add_argument('--test_mode', type=int, default=0)
return parser
# ------ Function decorator for adding additional command line arguments ----- #
def add_additional_args(parser_func: Callable, additional_args: Dict[str, type]) -> Callable:
"""
Function decorator that adds additional command line arguments to the parser.
This allows for adding additional arguments without having to change the base
parser.
Parameters
----------
parser_func : Callable
The parser function to add arguments to.
additional_args : Dict[str, type]
A dictionary where the keys are the names of the arguments and the values
are the types of the arguments, e.g. {'arg1': str, 'arg2': int}.
Returns
-------
Callable
A parser function that returns the ArgumentParser object with the additional arguments added to it.
"""
def wrapper():
# Call the original parser function to get the parser object
parser = parser_func()
for arg_name, arg_type in additional_args.items():
parser.add_argument(f'--{arg_name}', type=arg_type)
args, _ = parser.parse_known_args()
return args
return wrapper
# ----------------------- Function for database secret ----------------------- #
def get_secret(secret_name: str, region_name: str = 'ur-east-1') -> Union[Dict, bytes]:
"""
Get secret from AWS Secrets Manager.
Parameters
----------
secret_name : str
Name of the secret to retrieve.
region_name : str, optional
Region, by default 'ur-east-1'
Returns
-------
Union[Dict, bytes]
Secret retrieved from AWS Secrets Manager.
"""
# Create a secrets manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# We provided an invalid value for a parameter
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# We provided a parameter value that is not valid for the current state of the resource
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# Can't find the resource that we asked for
raise e
else:
# If the secret was a JSON-encoded dictionary string, convert it to dictionary
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
secret = ast.literal_eval(secret) # Convert string to dictionary
return secret
# If the secret was binary, decode it
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return decoded_binary_secret
# --------------------- Function for setting up database --------------------- #
def get_db_url(host: str, db_name: str, db_secret: str, region_name: str = 'us-east-1') -> str:
"""
Set up database for Optuna.
Parameters
----------
host : str
Host name of the database.
db_name : str
Name of the database.
db_secret : str
Name of the secret that contains the database credentials.
region_name : str, optional
Region, by default 'us-east-1'.
Returns
-------
str
Database URL.
"""
secret = get_secret(db_secret, region_name)
connector = 'pymysql'
user_name = secret['username']
password = secret['password']
db_url = f'mysql+{connector}://{user_name}:{password}@{host}/{db_name}'
return db_url
# ------------------------- Custom log loss function ------------------------- #
def custom_log_loss(y_true: np.array, y_pred: np.array) -> float:
"""
Custom log loss function. Note that this function expects a 1-D
array for both y_true and y_pred. In the case of y_pred, the
probabilities of the positive class should be passed.
Parameters
----------
y_true : np.array
The 1-D true labels.
y_pred : np.array
The 1-D predicted probabilities of the positive class.
Returns
-------
float
The log loss.
"""
y_true = y_true.copy()
# One hot encode the true labels
y_true = np.eye(2)[y_true.astype(int)]
# Clip probabilities to avoid 1 or 0, where log loss is undefined
eps = np.finfo(y_pred.dtype).eps
y_pred = np.clip(y_pred, eps, 1 - eps)
# Compute the log loss for each class
loss_0 = - np.sum(y_true[:, 0] * np.log((1 - y_pred))) / y_true[:, 0].sum()
loss_1 = - np.sum(y_true[:, 1] * np.log(y_pred)) / y_true[:, 1].sum()
# Compute the average log loss
log_loss = (loss_0 + loss_1) / 2
return log_loss
# ----------------------- Class for feature engineering ---------------------- #
class FeatureEngine(TransformerMixin, BaseEstimator):
"""
A custom transformer that engineers new numerical features. It create pairwise interactions between the top 5
most importance features (based on impurity-based feature importance) identified using the baseline random forest
model. Next, it creates polynomial features for the top 15 most important features. Finally, it engineers new features
by taking the median, max, standard deviation, and sum of the top 5 and top 15 most important features.
"""
def __init__(self, top_5_feat: List[str], top_15_feat: List[str], cat_feat: List[str]):
"""
Constructor for the FeatureEngine class.
Parameters
----------
top_5_feat : List[str]
List of the top 5 most important features.
top_15_feat : List[str]
List of the top 15 most important features.
cat_feat : List[str]
List of categorical features.
"""
self.top_5_feat = top_5_feat
self.top_15_feat = top_15_feat
self.cat_feat = cat_feat
def fit(self, X: pd.DataFrame, y: Union[np.ndarray, pd.Series] = None):
"""
Fit the FeatureEngine transformer. This is a no-op.
Parameters
----------
X : pd.DataFrame
Data matrix.
y : Union[np.ndarray, pd.Series], optional
Ignored, present here for API consistency by convention, by default None.
Returns
-------
self: FeatureEngine
A fitted FeatureEngine transformer.
"""
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Transform the data matrix by engineering new features.
Parameters
----------
X : pd.DataFrame
Data matrix.
Returns
-------
pd.DataFrame
Transformed data matrix.
"""
X = X.copy()
# Polynomial features without interactions for top 5 features
X[[col + '_squared' for col in self.top_5_feat]] = X[self.top_5_feat].pow(2)
X[[col + '_cubed' for col in self.top_5_feat]] = X[self.top_5_feat].pow(3)
X[[col + '_sqrt' for col in self.top_5_feat]] = X[self.top_5_feat].pow(0.5)
# Math operations for the top 5 most important features
X['top_five_product'] = X[self.top_5_feat].prod(axis=1)
X['top_five_sum'] = X[self.top_5_feat].sum(axis=1)
X['top_five_median'] = X[self.top_5_feat].median(axis=1)
X['top_five_max'] = X[self.top_5_feat].max(axis=1)
X['top_five_std'] = X[self.top_5_feat].std(axis=1)
# Math operations for the top 15 most important features
X['top_fifteen_product'] = X[self.top_15_feat].prod(axis=1)
X['top_fifteen_sum'] = X[self.top_15_feat].sum(axis=1)
X['top_fifteen_max'] = X[self.top_15_feat].max(axis=1)
X['top_fifteen_median'] = X[self.top_15_feat].median(axis=1)
X['top_fifteen_std'] = X[self.top_15_feat].std(axis=1)
# Group by categorical feature and apply aggregations to the top 5 most important features
for group in self.cat_feat:
for agg_func in ['mean', 'max', 'sum']:
X[[col + f'_{agg_func}_by_{group}' for col in self.top_5_feat]] = X.groupby(group)[self.top_5_feat].transform(agg_func)
# List of tuples (col_i, col_j) for top 5 most important features
col_pairs = list(combinations(self.top_5_feat, 2))
# List of tuples (col_q, col_t, col_k) for top 5 most important features
col_triplets = list(combinations(self.top_5_feat, 3))
py_operators = {
'add': operator.add,
'sub': operator.sub,
'mul': operator.mul
}
# Calculate the number of columns for pairwise and triplet interactions
num_pairwise_cols = len(py_operators) * len(col_pairs)
num_triplet_cols = len(py_operators) * len(col_triplets)
# Create column names for pairwise and triplet interactions
pairwise_cols = [f'{col_i}_{func_key}_{col_j}' for func_key in py_operators for col_i, col_j in col_pairs]
triplet_cols = [f'{col_q}_{func_key}_{col_t}_{func_key}_{col_k}' for func_key in py_operators for col_q, col_t, col_k in col_triplets]
# Preallocate memory for pairwise and triplet interactions
pairwise_interactions = pd.DataFrame(index=X.index, columns=pairwise_cols, dtype=float)
triplet_interactions = pd.DataFrame(index=X.index, columns=triplet_cols, dtype=float)
# Pairwise interactions for the top 5 most important features
for func_key in py_operators:
for col_i, col_j in col_pairs:
pairwise_interactions[f'{col_i}_{func_key}_{col_j}'] = py_operators[func_key](X[col_i], X[col_j])
# Triplet interactions for the top 5 most important features
for func_key in py_operators:
for col_q, col_t, col_k in col_triplets:
triplet_interactions[f'{col_q}_{func_key}_{col_t}_{func_key}_{col_k}'] = py_operators[func_key](X[col_q], py_operators[func_key](X[col_t], X[col_k]))
# Concatenate the original DataFrame with the new interaction DataFrames
X = pd.concat([X, pairwise_interactions, triplet_interactions], axis=1)
return X
# ------------------------ Function to create pipeline ----------------------- #
def create_preprocessor(top_5_feat: List[str], top_15_feat: List[str], num_feat: List[str], cat_feat: List[str]) -> Pipeline:
"""
Create a preprocessing pipeline.
Parameters
----------
top_5_feat : List[str]
List of the top 5 most important features.
top_15_feat : List[str]
List of the top 15 most important features.
num_feat : List[str]
List of numerical features.
cat_feat : List[str]
List of categorical features.
Returns
-------
Pipeline
A sklearn pipeline.
"""
# Preprocessing pipeline
preprocessor = ColumnTransformer([
('num', SimpleImputer(strategy='median'), num_feat),
('cat', OrdinalEncoder(dtype=np.int16, handle_unknown='use_encoded_value', unknown_value=-999, encoded_missing_value=-999), cat_feat)
], remainder='passthrough')
# Configure all preprocessing steps to output pandas dataframes
preprocessor.set_output(transform='pandas')
# Feature engine expects a pandas dataframe
cat_feat = [f'cat__{col}' for col in cat_feat]
pipeline = Pipeline([
('preprocessor', preprocessor),
('feature_engine', FeatureEngine(top_5_feat, top_15_feat, cat_feat))
])
return pipeline
# ------- Class for uploading and downloading dictionary to and from S3 ------ #
class FeatureImportanceHandler:
"""
Class for uploading and downloading feature importance dictionary to and from S3.
"""
def __init__(self, s3_key: str, s3_bucket: str, job_name: str, trial_number: int) -> None:
"""
Parameters
----------
s3_key : str
S3 key.
s3_bucket : str
S3 bucket.
job_name : str
Training job name to differentiate between different training jobs.
trial_number : int
Trial number to differentiate between different trials.
"""
self.s3_key = s3_key
self.s3_bucket = s3_bucket
self.job_name = job_name
self.client = boto3.client('s3')
self.trial_number = trial_number
@staticmethod
def perm_feat_imp(model_pipeline: Pipeline, scorer: Callable, n_repeats: int, val_data: Tuple[np.ndarray]) -> Dict[str, float]:
"""
Compute the permutation feature importance for a given model pipeline over 10 iterations.
Parameters
----------
model_pipeline : Pipeline
Model pipeline (must be fitted).
scorer : Callable
Scorer function.
n_repeats : int
Number of iterations for computing the permutation feature importance.
val_data : Tuple[np.ndarray]
Validation data.
Returns
-------
Dict[str, float]
Dictionary of feature importances where the keys are the feature names and the values are the feature importances.
"""
feature_names = model_pipeline.named_steps['rf_clf'].feature_names_in_
log_loss_scorer = make_scorer(
score_func=scorer,
greater_is_better=False, # Loss loss is a loss function (minimize)
needs_proba=True
)
perm_imp_result = permutation_importance(
estimator=model_pipeline,
X=val_data[0],
y=val_data[1],
scoring=log_loss_scorer,
n_repeats=n_repeats
)
perm_imp_dict = dict(zip(feature_names, perm_imp_result['importances_mean']))
return perm_imp_dict
@staticmethod
def impure_feat_imp(model_pipeline: Pipeline) -> Dict[str, float]:
"""
Extract the impurity-based feature importance for a given model pipeline.
Parameters
----------
model_pipeline : Pipeline
Model pipeline (must be fitted).
Returns
-------
Dict[str, float]
Dictionary of feature importances where the keys are the feature names and the values are the feature importances.
"""
feature_names = model_pipeline.named_steps['rf_clf'].feature_names_in_
impurity_imp_dict = dict(zip(feature_names, model_pipeline.named_steps['rf_clf'].feature_importances_))
return impurity_imp_dict
def upload(self, dictionary: Dict[str, Any]) -> None:
"""
Upload feature dictionary to S3.
Parameters
----------
dictionary : Dict[str, Any]
Dictionary to upload.
"""
self.client.put_object(
Bucket=self.s3_bucket,
Key=f'{self.s3_key}/eda/{self.job_name}-trial-{self.trial_number}-feature-importance.pickle',
Body=pickle.dumps(dictionary)
)
return None
def download(self) -> Dict[str, Any]:
"""
Download dictionary from S3.
Returns
-------
Dict[str, Any]
Dictionary downloaded from S3.
"""
dictionary = pickle.loads(
self.client.get_object(
Bucket=self.s3_bucket,
Key=f'{self.s3_key}/eda/{self.job_name}-trial-{self.trial_number}-feature-importance.pickle'
)['Body'].read()
)
return dictionary
def __del__(self) -> None:
"""
When the object is deleted, close the boto3 s3 client.
"""
self.client.close()
return None
# ------------------------ Function for creating study ----------------------- #
def create_study(study_name: str, storage: str, direction: str = 'minimize') -> optuna.study.Study:
"""
Create Optuna study instance.
Parameters
----------
study_name : str
Name of the study.
storage : str
Database url.
direction: str
Direction of the metric--- maximize or minimize.
Returns
-------
optuna.study.Study
Optuna study instance.
"""
study = optuna.create_study(
storage=storage,
sampler=optuna.samplers.TPESampler(),
study_name=study_name,
direction=direction,
load_if_exists=True
)
return study
# ------------------- Function for reporting study results ------------------- #
def study_report(study: optuna.study.Study, logger: logging.Logger) -> None:
"""
Report study results.
Parameters
----------
study : optuna.study.Study
Optuna study instance.
logger : logging.Logger
The logger object.
"""
pruned_trials = study.get_trials(
deepcopy=False,
states=[TrialState.PRUNED]
)
complete_trials = study.get_trials(
deepcopy=False,
states=[TrialState.COMPLETE]
)
best_trial = study.best_trial
logger.info(f'Number of pruned trials: {len(pruned_trials)}')
logger.info(f'Number of complete trials: {len(complete_trials)}')
logger.info(f'Best trial score: {best_trial.value}')
logger.info(f'Best trial params: {best_trial.params}')
return None
# ---------------- Class for visualizing hyperparameter tuning --------------- #
class StudyVisualizer:
"""
Class for visualizing hyperparameter tuning via Optuna
"""
def __init__(self, study: optuna.study.Study) -> None:
"""
Parameters
----------
study : optuna.study.Study
Optuna study instance.
"""
self.study = study
self.plot_func_dict = plot_functions = {
'plot_optimization_history': optuna.visualization .plot_optimization_history,
'plot_slice': optuna.visualization .plot_slice,
'plot_parallel_coordinate': optuna.visualization .plot_parallel_coordinate,
'plot_contour': optuna.visualization .plot_contour,
'plot_param_importances': optuna.visualization .plot_param_importances
}
def _static_plot(self, plot_func: str, figsize: Tuple[float, float], **kwargs) -> Image:
"""
Create static plot.
Parameters
----------
figsize : Tuple[float, float]
Figure size.
**kwargs
Keyword arguments to pass to the plot function.
"""
fig = self.plot_func_dict[plot_func](self.study, **kwargs)
fig.update_layout(width=figsize[0], height=figsize[1])
fig_bytes = fig.to_image(format='png')
return Image(fig_bytes)
def plot_optimization_history(self, figsize: Tuple[float]) -> Image:
"""
Plot optimization history.
Parameters
----------
figsize : Tuple[float]
Figure size.
Returns
-------
Image
Image of the plot.
"""
return self._static_plot('plot_optimization_history', figsize)
def plot_param_importances(self, figsize: Tuple[float]) -> Image:
"""
Plot parameter importances.
Parameters
----------
figsize : Tuple[float]
Figure size.
Returns
-------
Image
Image of the plot.
"""
return self._static_plot('plot_param_importances', figsize)
def plot_parallel_coordinate(self, params: List[str], figsize: Tuple[float]) -> Image:
"""
Plot parallel coordinate.
Parameters
----------
params : List[str]
List of parameters to plot.
figsize : Tuple[float]
Figure size.
Returns
-------
Image
Image of the plot.
"""
return self._static_plot('plot_parallel_coordinate', figsize, params=params)
def plot_contour(self, params: List[str], figsize: Tuple[float]) -> Image:
"""
Plot contour.
Parameters
----------
params : List[str]
List of parameters to plot.
figsize : Tuple[float]
Figure size.
"""
return self._static_plot('plot_contour', figsize, params=params)
def plot_slice(self, params: List[str], figsize: Tuple[float]) -> Image:
"""
Plot slice.
Parameters
----------
params : List[str]
List of parameters to plot.
figsize : Tuple[float]
Figure size.
Returns
-------
Image
Image of the plot.
"""
return self._static_plot('plot_slice', figsize, params=params)
|
YangWu1227/python-for-machine-learning
|
tree_based/projects/age_related_conditions_sagemaker/src/custom_utils.py
|
custom_utils.py
|
py
| 23,651 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "logging.Logger",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "typing.Callable",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "boto3.session.Session",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "ast.literal_eval",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "numpy.eye",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.finfo",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "itertools.combinations",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "operator.sub",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "operator.mul",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "sklearn.compose.ColumnTransformer",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "sklearn.impute.SimpleImputer",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OrdinalEncoder",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 384,
"usage_type": "attribute"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "boto3.client",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "sklearn.metrics.make_scorer",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "sklearn.inspection.permutation_importance",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "pickle.dumps",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "pickle.loads",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "optuna.create_study",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "optuna.samplers.TPESampler",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "optuna.samplers",
"line_number": 547,
"usage_type": "attribute"
},
{
"api_name": "optuna.study",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "optuna.study",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "logging.Logger",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "optuna.trial.TrialState.PRUNED",
"line_number": 570,
"usage_type": "attribute"
},
{
"api_name": "optuna.trial.TrialState",
"line_number": 570,
"usage_type": "name"
},
{
"api_name": "optuna.trial.TrialState.COMPLETE",
"line_number": 574,
"usage_type": "attribute"
},
{
"api_name": "optuna.trial.TrialState",
"line_number": 574,
"usage_type": "name"
},
{
"api_name": "optuna.study",
"line_number": 593,
"usage_type": "attribute"
},
{
"api_name": "optuna.visualization",
"line_number": 602,
"usage_type": "attribute"
},
{
"api_name": "optuna.visualization",
"line_number": 603,
"usage_type": "attribute"
},
{
"api_name": "optuna.visualization",
"line_number": 604,
"usage_type": "attribute"
},
{
"api_name": "optuna.visualization",
"line_number": 605,
"usage_type": "attribute"
},
{
"api_name": "optuna.visualization",
"line_number": 606,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 609,
"usage_type": "name"
},
{
"api_name": "IPython.display.Image",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "IPython.display.Image",
"line_number": 609,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 626,
"usage_type": "name"
},
{
"api_name": "IPython.display.Image",
"line_number": 626,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 642,
"usage_type": "name"
},
{
"api_name": "IPython.display.Image",
"line_number": 642,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "IPython.display.Image",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 676,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 676,
"usage_type": "name"
},
{
"api_name": "IPython.display.Image",
"line_number": 676,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 689,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 689,
"usage_type": "name"
},
{
"api_name": "IPython.display.Image",
"line_number": 689,
"usage_type": "name"
}
] |
37558604591
|
import time
import uuid
from utils.sns_handler import SNSHandler
class OrderEventService:
def __init__(self):
self._sns = SNSHandler()
def publish_order(self, _items_complete, _total_items, _order):
return self._sns.publish({
'id': str(uuid.uuid4()),
'timestamp': int(round(time.time() * 1000)),
'items': _items_complete,
'labor': _order['labor'],
'discount': _order['discount'],
'totalItems': _total_items
})
|
silassansil/simple-order-eventsourcing-cqrs-app
|
shared/service/order_event_service.py
|
order_event_service.py
|
py
| 519 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.sns_handler.SNSHandler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
}
] |
33227699274
|
import urllib.request
from apscheduler.schedulers.blocking import BlockingScheduler
#from cyimapp.views import modifyUbike
import datetime
sched = BlockingScheduler()
"""
@sched.scheduled_job('interval', minutes=1)
def timed_job():
print('This job is run every one minutes.')
"""
@sched.scheduled_job('cron', hour='1-23', minute='*/10')#, day_of_week='mon-fri'
def scheduled_job():
print('This job runs every day */10 min.')
# 利用datetime查詢時間
print(f'{datetime.datetime.now().ctime()}')
url = 'https://cyim-finalproject.herokuapp.com/modifyUbike'
conn = urllib.request.urlopen(url)
for key, value in conn.getheaders():
print(key, value)
#print('This job is run every weekday at 5pm.')
sched.start()
|
lwyuki0524/CYIM-linebot-finalproject
|
clock.py
|
clock.py
|
py
| 752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "apscheduler.schedulers.blocking.BlockingScheduler",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 19,
"usage_type": "name"
}
] |
15077013091
|
# -*- coding: UTF-8 -*-
import xlrd
from datetime import date,datetime
#author:by Seven
#python读取excel表中单元格的内容返回的有5种类型,即ctype:
# ctype: 0 empty ,1 string ,2 number,3 date,4 boolean,5 error
#读取的文件名
rfile='test1.xlsx'
def read_excel():
wb = xlrd.open_workbook(filename=rfile)
sheet_list=wb.sheet_names()
sheet1=wb.sheet_by_index(0) #通过索引获取表格
sheet2=wb.sheet_by_name('工资') # 通过名字获取表格
#print(sheet1,sheet2) 此处打印sheet两个变量的内存地址
#print(sheet1.name,sheet1.nrows,sheet1.ncols)
#rows=sheet1.row_values(1) #获取行内容
#cols=sheet1.col_values(0) #获取列内容
#print(rows,cols)
#print(sheet2.name,sheet2.nrows,sheet2.ncols)
# 获取表格里的内容,三种方式
# print(sheet1.cell(1,2).value) #即打印第1行第0列的内容
# print(sheet1.cell_value(1,2))
# print(sheet1.row(1)[2].value)
#print(sheet1.cell(1,2).ctype)# 即 ctype的值
# 处理时间格式用xlrd的模块处理
date_value = xlrd.xldate_as_tuple(sheet1.cell_value(1,2),wb.datemode)
print(date(*date_value[:3])) #第一种时间格式
print(date(*date_value[:3]).strftime('%Y/%m/%d'))
if __name__=="__main__":
read_excel()
|
ByX54192/Common-Script
|
rxls.py
|
rxls.py
|
py
| 1,232 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "xlrd.open_workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "xlrd.xldate_as_tuple",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 32,
"usage_type": "call"
}
] |
355990780
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import wikipedia
import re
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Config.Config import Config
from EmeraldAI.Logic.Logger import FileLogger
class Wikipedia(object):
__metaclass__ = Singleton
def __init__(self):
wikipedia.set_lang(Config().Get("DEFAULT", "CountryCode2Letter"))
def GetSummary(self, term, fallback = True, trimBrackets=True):
summary = None
try:
try:
#wikipedia.summary(query, sentences=0, chars=0, auto_suggest=True, redirect=True)
summary = wikipedia.summary(term.title(), 0, 0, False, True)
except wikipedia.exceptions.DisambiguationError as e:
FileLogger().Error("Wikipedia Line 22: DisambiguationError: {0}".format(e))
if fallback:
topics = wikipedia.search(e.options[0])
for _, topic in enumerate(topics):
summary = wikipedia.summary(topic)
break
if summary is None or len(summary) < 5:
return None
if(trimBrackets):
summary = re.sub(r"[\(\[].*?[\)\]][,.;\s]", "", summary)
return summary
except Exception as e:
FileLogger().Error("Wikipedia Line 36: Exception: {0}".format(e))
return None
def GetImages(self, term, fallback = False):
page = None
try:
page = wikipedia.WikipediaPage(term)
except:
if fallback:
topics = wikipedia.search(term)
for _, topic in enumerate(topics):
page = wikipedia.WikipediaPage(topic)
break
if page is None:
return None
return page.images
|
MaxMorgenstern/EmeraldAI
|
EmeraldAI/Logic/KnowledgeGathering/Wikipedia.py
|
Wikipedia.py
|
py
| 1,830 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "EmeraldAI.Logic.Singleton.Singleton",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "wikipedia.set_lang",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "EmeraldAI.Config.Config.Config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "wikipedia.summary",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "wikipedia.exceptions",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "EmeraldAI.Logic.Logger.FileLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "wikipedia.search",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "wikipedia.summary",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "EmeraldAI.Logic.Logger.FileLogger",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "wikipedia.WikipediaPage",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "wikipedia.search",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "wikipedia.WikipediaPage",
"line_number": 49,
"usage_type": "call"
}
] |
72217056188
|
#!/usr/bin/python3
"""Base Model Module """
import uuid
from datetime import datetime
class BaseModel:
"""Base Model Class
The base model class is the base for
all other classes
"""
def __init__(self, *args, **kwargs):
"""Constructor for baseModel"""
if len(kwargs) == 0:
from models import storage
self.id = str(uuid.uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
storage.new(self)
else:
for key, value in kwargs.items():
# Don't copy __class__ attribute
if key == "__class__":
continue
# Set created_at and updated_at to instances of datetime
if key in ["created_at", "updated_at"]:
self.__setattr__(key, datetime.fromisoformat(value))
continue
self.__setattr__(key, value)
def __str__(self):
"""String representation of object instance"""
return f"[{self.__class__.__name__}] ({self.id}) {self.__dict__}"
def save(self):
"""Save function
Updates the update_at instance attribute
"""
from models import storage
self.updated_at = datetime.now()
storage.save()
def to_dict(self):
"""to_dict function
Returns a dictionary containing all keys/values of
__dict__ of the instance
"""
new_dict = self.__dict__.copy()
new_dict["updated_at"] = new_dict["updated_at"].isoformat()
new_dict["created_at"] = new_dict["created_at"].isoformat()
new_dict["__class__"] = self.__class__.__name__
return new_dict
|
Ayo-Awe/AirBnB_clone
|
models/base_model.py
|
base_model.py
|
py
| 1,748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "uuid.uuid4",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "models.storage.new",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromisoformat",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.storage.save",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 47,
"usage_type": "name"
}
] |
15579268331
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class cbow(nn.Module):
def __init__(self, vocab_size, embedding_dim=20, padding=True):
super(cbow, self).__init__()
# num_embeddings is the number of words in your train, val and test set
# embedding_dim is the dimension of the word vectors you are using
if padding:
self.embeddings = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim,
padding_idx=0)
else:
self.embeddings = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim,
padding_idx=None)
self.linear_out = nn.Linear(in_features=embedding_dim, out_features=vocab_size, bias=False)
def forward(self, inputs):
embeds = self.embeddings(inputs)
# To not care about the order of the words we take the mean of the time dimension
means = torch.mean(embeds, dim=1)
# Softmax on output
#probs = F.log_softmax(out, dim=1)
probs = F.log_softmax(self.linear_out(means), dim=1)
return probs
|
mari756h/The_unemployed_cells
|
model/cbow.py
|
cbow.py
|
py
| 1,227 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.mean",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 28,
"usage_type": "name"
}
] |
372413184
|
import boto3
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
from datetime import datetime
import util
dynamodb = boto3.resource('dynamodb')
def lambda_handler(event, context):
# 送られてくるUserId,mochiliMembers,CognitoIdを取得
body = event["Body"]
creater_id = body["CreaterId"]
mochili_name = body["MochiliName"]
mochili_members = body["MochiliMembers"]
cognito_id = event["CognitoId"]
created_at = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
updated_at = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
mochili_id = creater_id + "--" + created_at
result = {}
# checkUser
if not (util.check_user(creater_id, cognito_id)):
return {}
try:
# mochiliを登録
mochilis_table = dynamodb.Table('Mochilis')
mochilis_table.put_item(
Item={
"MochiliId": mochili_id,
"MochiliName": mochili_name,
"CreaterId": creater_id,
"UpdaterId": creater_id,
"CreatedAt": created_at,
"UpdatedAt": updated_at
},
ConditionExpression=
'attribute_not_exists(MochiliId)'
)
# mochiliSharesを登録
mochili_shares_table = dynamodb.Table('MochiliShares')
for mochili_member_id in mochili_members:
mochili_shares_table.put_item(
Item={
"MochiliId": mochili_id,
"UserId": mochili_member_id,
"CreatedAt": created_at
},
ConditionExpression=
'attribute_not_exists(MochiliId) AND attribute_not_exists(UserId)'
)
result = {"Status": "OK",
"Detail": mochili_id}
except ClientError as clientError:
result = {"Status": clientError.response['Error']['Code'],
"Detail": str(clientError)}
# resultを返す
return result
|
ryamagishi/mochili_lambda
|
postMochili/lambda_function.py
|
lambda_function.py
|
py
| 2,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.resource",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "util.check_user",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 58,
"usage_type": "name"
}
] |
30729197690
|
import json
import logging
from threading import Thread
from collections import deque
import pika
from .connect import _Connection
LOGGER = logging.getLogger(__name__)
class Publisher(Thread):
"""Multithreaded publisher.
We use a multithreaded publisher to keep the I/O loop (and heartbeat) alive and maintain a persistent connection.
This is more efficient than creating a new connection for every message.
"""
def __init__(self, config, key, exchange=None):
"""
`config`: The `klein_config.config.EnvironmentAwareConfig` containing connection details to rabbit.
`key`: The `str` key in the config with specific publisher config, these are:
```yaml
key: # i.e. upstream
queue: 'queue name' # The name of the rabbitmq queue.
create_on_connect: true # Whether to create a queue on connection.
exchange: 'exchange name' # (Optional) the name of the exchange to publish to (defaults to the default
# exchange).
exchange_type: 'direct' # (Optional) the type of exchange to consume from (e.g. 'topic', 'fanout').
# Defaults to 'direct'.
confirm_delivery: false # (Optional) toggles delivery confirmations. Defaults to true.
```
## Example
**main.py**
```python
from klein_config.config import EnvironmentAwareConfig
from klein_queue.rabbitmq.publisher import Publisher
config = EnvironmentAwareConfig() # Read from file specified with `--config`
publisher = Publisher(config, "publisher")
if __name__ == "__main__":
publisher.start() # spawns the publisher thread
publisher.add({'id': 'abc123'}) # sends a message
```
**config.yaml**
```yaml
rabbitmq:
host: [localhost]
port: 5672
username: guest
password: guest
heartbeat: 2
exchange: 'test_exchange' # You can also define an exchange here if it is used by multiple consumers.
publisher:
queue: test
create_on_connect: true
```
**terminal**
```bash
$ python main.py --config config.yaml
```
"""
self._publisher = _PublisherWorker(config, key, exchange=exchange)
self.queue = config.get(f"{format(key)}.queue", '')
super().__init__()
def run(self):
"""
Start the publisher & run it's IO loop ***within the current thread***. This will block the current thread and
is *not recommended*.
"""
self._publisher.run()
def add(self, message, properties=None, persist=True):
"""
Adds a `message` (`dict`) to the internal queue to be published with the set `properties`.
If you do not wish to persist your messages, you must explicitly set `persist` to `False`.
"""
if persist and properties is None:
properties = pika.BasicProperties(delivery_mode=2)
elif persist:
properties.delivery_mode = 2
self._publisher.publish(message, properties)
def publish(self, message, properties=None, persist=True):
"""
Adds a `message` to the internal queue - alias of `src.klein_queue.rabbitmq.publisher.Publisher.add`.
"""
self.add(message, properties, persist)
def stop(self):
"""
Stops the publisher and closes the connection to rabbit.
"""
self._publisher.threadsafe_call(self._publisher.stop)
def start(self): # pylint: disable=useless-super-delegation
"""
Start the publisher & run it's IO loop ***in a seperate thread***.
"""
super().start()
class _PublisherWorker(_Connection):
def __init__(self, config, key, exchange=None):
self._messages = deque([])
self._deliveries = []
self._acked = 0
self._nacked = 0
self._message_number = 0
self._stopping = False
self._key = key
d = config.get(f"{key}.confirm_delivery", "true")
self._confirm_delivery = d is True or (isinstance(d, str) and d.lower() in ["true", "1", "yes"])
super().__init__(config, key, exchange=exchange)
def _start_activity(self):
LOGGER.debug('Issuing consumer related RPC commands')
if self._confirm_delivery:
self.enable_delivery_confirmations()
self.schedule_next_message()
def _stop_activity(self):
self._stopping = True
self.close_channel()
self.close_connection()
def enable_delivery_confirmations(self):
LOGGER.debug('Issuing Confirm.Select RPC command')
self._channel.confirm_delivery(self.on_delivery_confirmation)
def on_delivery_confirmation(self, method_frame):
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type,
method_frame.method.delivery_tag)
if confirmation_type == 'ack':
self._acked += 1
elif confirmation_type == 'nack':
self._nacked += 1
self._deliveries.remove(method_frame.method.delivery_tag)
LOGGER.debug('Published %i messages, %i have yet to be confirmed, '
'%i were acked and %i were nacked',
self._message_number, len(self._deliveries),
self._acked, self._nacked)
def schedule_next_message(self):
if self._stopping:
return
LOGGER.debug('Scheduling next message')
self._connection.ioloop.add_callback_threadsafe(self.__publish_message)
def __publish_message(self):
if self._stopping:
LOGGER.debug(
'Publisher currently stopping, unable to publish messages at this time')
return
if not self._messages:
# no messages to publish... do nothing
self.schedule_next_message()
return
(message, properties) = self._messages.popleft()
connection = self._config.get(self._key)
if connection.has("queue"):
LOGGER.debug('Publishing message to queue %s', connection.get("queue"))
elif connection.has("exchange"):
LOGGER.debug('Publishing message to exchange %s', connection.get("exchange"))
self._channel.basic_publish(self._exchange,
connection.get("queue", ''),
json.dumps(message),
properties)
self._message_number += 1
if self._confirm_delivery:
self._deliveries.append(self._message_number)
LOGGER.debug('Published message # %i', self._message_number)
self.schedule_next_message()
def publish(self, message, properties=None):
LOGGER.debug(
'Adding message to internal stack ready for publishing')
self._messages.append((message, properties))
|
mdcatapult/py-queue
|
src/klein_queue/rabbitmq/publisher.py
|
publisher.py
|
py
| 7,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pika.BasicProperties",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "connect._Connection",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 179,
"usage_type": "call"
}
] |
477702523
|
from Predictor.Base import BaseTrainner
import torch as t
from tqdm import tqdm
import numpy as np
class Trainner(BaseTrainner):
def __init__(self, args, vocab, model, loss_func, score_func, train_loader, dev_loader, use_multi_gpu=True):
super(Trainner, self).__init__(args, vocab, model, loss_func, score_func, train_loader, dev_loader, use_multi_gpu=use_multi_gpu)
def train(self):
for epoch in range(self.args.epochs):
self.train_epoch()
self.global_epoch += 1
self.reserve_topk_model(5)
if self.summary_writer:
self.summary_writer.close()
print(f'Done')
def train_epoch(self):
for data in tqdm(self.train_loader, desc='train step'):
train_loss = self.train_inference(data)
train_loss.backward()
if self.use_multi_gpu:
self.model.module.encoder.embedding.weight.grad.data[0] = 0
else:
self.model.encoder.embedding.weight.grad.data[0] = 0
t.nn.utils.clip_grad_norm_(parameters=self.model.parameters(), max_norm=5.0)
self.optim.step_and_update_lr()
if self.summary_writer:
self.summary_writer.add_scalar('loss/train_loss', train_loss.item(), self.global_step)
self.summary_writer.add_scalar('lr', self.optim.current_lr, self.global_step)
self.global_step += 1
if self.global_step % self.args.eval_every_step == 0:
eval_score, eval_loss = self.evaluation()
if self.global_step % self.args.save_every_step == 0:
self.save(eval_score, eval_loss)
def evaluation(self):
losses = []
scores = []
self.model.eval()
with t.no_grad():
for data in tqdm(self.dev_loader, desc='eval_step'):
loss, score, pre, tru = self.eval_inference(data)
losses.append(loss.item())
scores.append(score)
self.write_sample_result_text(pre, tru)
eval_loss = np.mean(losses)
eval_score = np.mean(scores)
if self.summary_writer:
self.summary_writer.add_scalar('loss/eval_loss', eval_loss, self.global_step)
self.summary_writer.add_scalar('score/eval_score', eval_score, self.global_step)
if self.use_multi_gpu:
for i,v in self.model.module.name_parameters():
self.summary_writer.add_histogram(i.replace('.', '/'), v.clone().cpu().data.numpy(), self.global_step)
else:
for i,v in self.model.name_parameters():
self.summary_writer.add_histogram(i.replace('.', '/'), v.clone().cpu().data.numpy(), self.global_step)
self.model.train()
return eval_loss, eval_score
def train_inference(self, data):
context, title = [i.cuda() for i in data]
self.optim.zero_grad()
token_id, prob_vector = self.model(context, title)
loss = self.loss_func(prob_vector, title)
return loss
def eval_inference(self, data):
context, title = [i.cuda() for i in data]
token_id, prob_vector = self.model(context, title)
loss = self.loss_func(prob_vector, title)
score = self.score_func(token_id, title)
return loss, score, token_id, title
def write_sample_result_text(self, pre, tru):
token_list = pre.data.tolist()[0]
title_list = tru.data.tolist()[0]
word_list = [self.vocab.from_id_token(word) for word in token_list]
title_list = [self.vocab.from_id_token(word) for word in title_list]
word_pre = ' '.join(word_list) + '- -' + ' '.join(title_list)
self.summary_writer.add_text('pre', word_pre, global_step=self.global_step)
|
CNDPlab/ByteCup2018
|
Trainner/trainner.py
|
trainner.py
|
py
| 3,810 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "Predictor.Base.BaseTrainner",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 55,
"usage_type": "call"
}
] |
30755519335
|
from collections import Counter
import math
n = int(input())
boxes = list(map(int, input().split(' ')))
boxes = sorted(boxes, reverse=True)
c = Counter(boxes)
ans = int(c[100] + max(0, math.ceil((c[50]-c[100])/3)))
print(ans)
|
Tanguyvans/Codeforces
|
SIT/D.py
|
D.py
|
py
| 230 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.Counter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 10,
"usage_type": "call"
}
] |
72533958589
|
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
import random
sns.set()
import pkg_resources
import types
from krx_wr_script import *
from tqdm import tqdm
from datetime import datetime
def get_state(data, t, n):
d = t - n + 1
block = data[d: t + 1] if d >= 0 else -d * [data[0]] + data[0: t + 1]
res = []
for i in range(n - 1):
res.append(block[i + 1] - block[i])
return np.array([res])
class Deep_Evolution_Strategy:
inputs = None
def __init__(
self, weights, reward_function, population_size, sigma, learning_rate
):
self.weights = weights
self.reward_function = reward_function
self.population_size = population_size
self.sigma = sigma
self.learning_rate = learning_rate
def _get_weight_from_population(self, weights, population):
weights_population = []
for index, i in enumerate(population):
jittered = self.sigma * i
weights_population.append(weights[index] + jittered)
return weights_population
def get_weights(self):
return self.weights
def train(self, epoch = 100, print_every = 1):
lasttime = time.time()
for i in tqdm(range(epoch)):
population = []
rewards = np.zeros(self.population_size)
for k in range(self.population_size):
x = []
for w in self.weights:
x.append(np.random.randn(*w.shape))
population.append(x)
for k in range(self.population_size):
weights_population = self._get_weight_from_population(
self.weights, population[k]
)
rewards[k] = self.reward_function(weights_population)
rewards = (rewards - np.mean(rewards)) / np.std(rewards)
for index, w in enumerate(self.weights):
A = np.array([p[index] for p in population])
self.weights[index] = (
w
+ self.learning_rate
/ (self.population_size * self.sigma)
* np.dot(A.T, rewards).T
)
# if (i + 1) % print_every == 0:
# print(
# 'iter %d. reward: %f'
# % (i + 1, self.reward_function(self.weights))
# )
# print('time taken to train:', time.time() - lasttime, 'seconds')
class Model:
def __init__(self, input_size, layer_size, output_size):
self.weights = [
np.random.randn(input_size, layer_size),
np.random.randn(layer_size, output_size),
np.random.randn(layer_size, 1),
np.random.randn(1, layer_size),
]
def predict(self, inputs):
feed = np.dot(inputs, self.weights[0]) + self.weights[-1]
decision = np.dot(feed, self.weights[1])
buy = np.dot(feed, self.weights[2])
return decision, buy
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
class Agent:
POPULATION_SIZE = 15
SIGMA = 0.1
LEARNING_RATE = 0.03
def __init__(self, model, money, max_buy, max_sell, window_size, close, skip):
self.model = model
self.initial_money = money
self.max_buy = max_buy
self.max_sell = max_sell
self.close = close
self.window_size = window_size
self.skip = skip
self.l = len(self.close) - 1
self.es = Deep_Evolution_Strategy(
self.model.get_weights(),
self.get_reward,
self.POPULATION_SIZE,
self.SIGMA,
self.LEARNING_RATE,
)
def act(self, sequence):
decision, buy = self.model.predict(np.array(sequence))
return np.argmax(decision[0]), int(buy[0])
def get_reward(self, weights):
initial_money = self.initial_money
starting_money = initial_money
self.model.weights = weights
close = self.close
state = get_state(close, 0, self.window_size + 1)
inventory = []
quantity = 0
for t in range(0, self.l, self.skip):
action, buy = self.act(state)
next_state = get_state(close, t + 1, self.window_size + 1)
if action == 1 and initial_money >= close[t]:
if buy < 0:
buy = 1
if buy > self.max_buy:
buy_units = self.max_buy
else:
buy_units = buy
total_buy = buy_units * close[t]
initial_money -= total_buy
inventory.append(total_buy)
quantity += buy_units
elif action == 2 and len(inventory) > 0:
if quantity > self.max_sell:
sell_units = self.max_sell
else:
sell_units = quantity
quantity -= sell_units
total_sell = sell_units * close[t]
initial_money += total_sell
state = next_state
return ((initial_money - starting_money) / starting_money) * 100
def fit(self, iterations, checkpoint):
self.es.train(iterations, print_every = checkpoint)
def buy(self, stock_name):
initial_money = self.initial_money
close = self.close
state = get_state(close, 0, self.window_size + 1)
starting_money = initial_money
states_sell = []
states_buy = []
inventory = []
quantity = 0
today_date = datetime.today().strftime("%Y%m%d")
if not os.path.exists('results/base_year/base_year_strategy_results/{}'.format(today_date)):
os.mkdir('results/base_year/base_year_strategy_results/{}'.format(today_date))
f = open('results/base_year/base_year_strategy_results/{}/{}_buy_sell_history.txt'.format(today_date, stock_name), 'a+')
for t in range(0, self.l, self.skip):
action, buy = self.act(state)
next_state = get_state(close, t + 1, self.window_size + 1)
if action == 1 and initial_money >= close[t]:
if buy < 0:
buy = 1
if buy > self.max_buy:
buy_units = self.max_buy
else:
buy_units = buy
total_buy = buy_units * close[t]
initial_money -= total_buy
inventory.append(total_buy)
quantity += buy_units
states_buy.append(t)
print(
'day %d: buy %d units at price %f, total balance %f quantity %d'
% (t, buy_units, total_buy, initial_money, quantity), file=f
)
elif action == 2 and len(inventory) > 0:
bought_price = inventory.pop(0)
if quantity > self.max_sell:
sell_units = self.max_sell
else:
sell_units = quantity
if sell_units < 1:
continue
quantity -= sell_units
total_sell = sell_units * close[t]
initial_money += total_sell
states_sell.append(t)
try:
invest = ((total_sell - bought_price) / bought_price) * 100
# invest = ((close[t] - bought_price) / 100)
except:
invest = 0
print(
'day %d, sell %d units at price %f, investment %f %%, total balance %f, quantity %d'
% (t, sell_units, total_sell, invest, initial_money, quantity), file=f
)
state = next_state
invest = ((initial_money - starting_money) / starting_money) * 100
time.sleep(0.1)
print(
'\ntotal gained %0.2f, total investment %0.2f %%'
% (initial_money - starting_money, invest) , file=f
)
plt.figure(figsize = (10, 5))
plt.plot(close, label = 'true close', c = 'g')
plt.plot(
close, 'X', label = 'predict buy', markevery = states_buy, c = 'b'
)
plt.plot(
close, 'o', label = 'predict sell', markevery = states_sell, c = 'r'
)
plt.legend()
fig_save = plt.gcf()
f.close()
return fig_save
|
YoungseokOh/Stock-prediction-toy-project
|
analysis/bot_strategy.py
|
bot_strategy.py
|
py
| 8,492 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "seaborn.set",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 231,
"usage_type": "name"
}
] |
27603498320
|
import requests
import os
import json
from json import JSONDecodeError
from dotenv import load_dotenv
from loguru import logger
from requests import Response
from typing import Any
load_dotenv()
X_RAPIDAPI_KEY = os.getenv('RAPID_API_KEY')
headers = {
"X-RapidAPI-Host": "hotels4.p.rapidapi.com",
"X-RapidAPI-Key": X_RAPIDAPI_KEY
}
def get_request(url: str, headers: {}, params: {}) -> Response:
"""Функция для выполнения запроса"""
try:
return requests.get(url=url, headers=headers, params=params, timeout=30)
except requests.exceptions.RequestException as exc:
logger.exception(exc)
def request_city(city: str) -> tuple[Any, Any]:
"""Функция для запроса к API и получения данных о городе"""
url = "https://hotels4.p.rapidapi.com/locations/v2/search"
querystring = {"query": city, "locale": "ru_RU", "currency": "RUB"}
try:
request = get_request(url=url, headers=headers, params=querystring)
data = json.loads(request.text)
return (data["suggestions"][0]["entities"][0]["destinationId"],
data["suggestions"][0]["entities"][0]["name"])
except (LookupError, TypeError) as exc:
logger.exception(exc)
def parse_list(parse_list: list, uid: str, city: str, distance: str) -> list:
"""Функция для подготовки данных к записи в базу данных"""
hotels = []
hotel_id, name, adress, center, price = '', '', '', 'нет данных', ''
for hotel in parse_list:
try:
hotel_id = hotel['id']
name = hotel['name']
adress = f'{hotel["address"]["countryName"]}, {city.capitalize()},' \
f' {hotel["address"].get("postalCode", "")},' \
f' {hotel["address"].get("streetAddress", "")}'
if len(hotel['landmarks']) > 0:
if hotel['landmarks'][0]['label'] == 'Центр города':
center = hotel['landmarks'][0]['distance']
price = str(hotel['ratePlan']['price']['exactCurrent'])
user_rating = hotel.get('guestReviews', {}).get('rating', 'нет данных').replace(',', '.')
if distance != '':
if float(distance) < float(center.split()[0].replace(',', '.')):
continue
hotels.append((uid, hotel_id, name, adress, center, price, user_rating))
except (LookupError, ValueError) as exc:
logger.exception(exc)
continue
return hotels
def request_list(id: str, list_param: list) -> list:
"""Функция для запроса к API и получения основных данных"""
url = "https://hotels4.p.rapidapi.com/properties/list"
check_in = list_param[1]
check_out = list_param[2]
sort_order = ''
landmark_ids = ''
price_min = ''
price_max = ''
page_size = list_param[4]
if list_param[6] == '/lowprice':
sort_order = 'PRICE'
elif list_param[6] == '/highprice':
sort_order = 'PRICE_HIGHEST_FIRST'
elif list_param[6] == '/bestdeal':
sort_order = 'DISTANCE_FROM_LANDMARK'
landmark_ids = 'Центр города'
price_min = list_param[7]
price_max = list_param[8]
querystring = {"destinationId": id, "pageNumber": "1", "pageSize": page_size, "checkIn": check_in,
"checkOut": check_out, "adults1": "1", "priceMin": price_min, "priceMax": price_max,
"sortOrder": sort_order, "locale": "ru_RU", "currency": "RUB",
"landmarkIds": landmark_ids}
try:
request = get_request(url=url, headers=headers, params=querystring)
data = json.loads(request.text)
parsed = parse_list(parse_list=data['data']['body']['searchResults']['results'], uid=list_param[5],
city=list_param[0], distance=list_param[9])
return parsed
except (LookupError, JSONDecodeError, TypeError) as exc:
logger.exception(exc)
def request_photo(id_hotel: str) -> list:
"""Функция для запроса к API и получения данных о фотографиях"""
url = "https://hotels4.p.rapidapi.com/properties/get-hotel-photos"
querystring = {"id": id_hotel}
photos = []
try:
response = get_request(url, headers=headers, params=querystring)
data = json.loads(response.text)
for photo in data['hotelImages']:
url = photo['baseUrl'].replace('_{size}', '_z')
photos.append((id_hotel, url))
return photos
except (JSONDecodeError, TypeError) as exc:
logger.exception(exc)
def check_foto(photo: str) -> bool:
"""Функция для проверки URL фото"""
try:
check_photo = requests.get(url=photo, timeout=30)
if check_photo.status_code == 200:
return True
except requests.exceptions.RequestException as exc:
logger.exception(exc)
|
Zaborev/hotel_search_bot
|
botrequests/hotels.py
|
hotels.py
|
py
| 5,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "loguru.logger.exception",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "requests.Response",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "loguru.logger.exception",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "loguru.logger.exception",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "loguru.logger.exception",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "loguru.logger.exception",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "loguru.logger.exception",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 126,
"usage_type": "name"
}
] |
35015744969
|
from flask import Flask, render_template, request
import scrapper as scrapper
import html
website = 'WeFashion'
def display():
app = Flask(__name__)
@app.route('/')
def index():
products = scrapper.getProducts('index','mens-footwear-special-shoes','plrty')
data = {'title':website, 'page':'Latest Products', 'products': products, 'pt':'index'}
return render_template('index.html', data=data)
@app.route('/search', methods = ['GET'])
def searchpd():
query = request.args.get('q')
order = request.args.get('o')
txt = ''
if order == 'rec':
txt = 'Fresh Arrived'
elif order == 'plth':
txt = 'Low Priced'
elif order == 'phtl':
txt = 'High Priced'
elif order == 'dhtl':
txt == 'Discounted'
else:
txt = "Popular"
products = scrapper.getProducts('search',query,order)
data = {'title':website, 'page':'Search results for '+str(query)+ ' - ' +str(txt), 'products': products, 'pt':'search', 'purl':'/search?q='+str(query)}
return render_template('index.html', data=data)
@app.route('/cat/<query>/<order>')
def listpd(query,order):
if order == 'rec':
txt = 'Fresh Arrived'
elif order == 'plth':
txt = 'Low Priced'
elif order == 'phtl':
txt = 'High Priced'
elif order == 'dhtl':
txt == 'Discounted'
else:
txt = "Popular"
products = scrapper.getProducts('cat',query,order)
data = {'title':website, 'page':str(txt)+' '+str(query)+' You might like', 'products': products, 'pt':'cat', 'purl':'/cat/'+str(query)}
return render_template('index.html', data=data)
@app.route('/product/<query>/<rc>')
def getpd(query,rc):
query = query + '/' + rc
#products = scrapper.getProducts('product','mens-footwear-special-shoes',1)
products = scrapper.getProducts('product',query,1)
data = {'title':website, 'page':products['p_title'], 'products': products, 'pt':'product', 'query': query}
return render_template('product.html', data=data, html=html)
app.run()
|
sameerkhanal209/SnapDealScrapper
|
website.py
|
website.py
|
py
| 2,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scrapper.getProducts",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "scrapper.getProducts",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scrapper.getProducts",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "scrapper.getProducts",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 68,
"usage_type": "call"
}
] |
21936779401
|
import csv
import os
import argparse
from pathlib import Path
import torch
from transformers import BertTokenizer
from dataset import max_seq_length
def read_ag_news_split(filepath, n=- 1):
"""Generate AG News examples."""
texts = []
labels = []
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(
csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
)
for id_, row in enumerate(csv_reader):
label, title, description = row
# Original labels are [1, 2, 3, 4] ->
# ['World', 'Sports', 'Business', 'Sci/Tech']
# Re-map to [0, 1, 2, 3].
label = int(label) - 1
text = " ".join((title, description))
labels.append(label)
texts.append(text)
#yield id_, {"text": text, "label": label}
return texts, labels
def process_and_cache_data_ag(args, data_type = 'train'):
data_path = 'data/ag'
print('Read_data...')
texts, labels = read_ag_news_split(os.path.join(data_path, data_type + '.csv'), n=-1)
print(f"Number of texts: {len(texts)}, number of labels: {len(labels)}")
print('Tokenize...')
tokenizer = BertTokenizer.from_pretrained(args.model_type)
encodings = tokenizer(texts, truncation=True, max_length=max_seq_length[args.task], padding=True)
encodings['labels'] = labels
cached_features_file = os.path.join(data_path, 'cached_{}_{}_{}_{}'.format(
data_type,
list(filter(None, args.model_type.split('/'))).pop(),
str(max_seq_length[args.task]),
str(args.task)))
print("Saving features into cached file", cached_features_file)
torch.save(encodings, cached_features_file)
def read_imdb_split(split_dir, n=- 1):
split_dir = Path(split_dir)
texts = []
labels = []
for label_dir in ["pos", "neg"]:
for i, text_file in enumerate((split_dir/label_dir).iterdir()):
if n != -1:
if i>= (n // 2):
break
else:
texts.append(text_file.read_text())
labels.append(0 if label_dir=="neg" else 1)
return texts, labels
def process_and_cache_data_imdb(args, data_type = 'train'):
data_path = 'data/imdb'
print('Read_data...')
texts, labels = read_imdb_split(os.path.join(data_path, data_type), n=-1)
print(f"Number of texts: {len(texts)}, number of labels: {len(labels)}")
print('Tokenize...')
tokenizer = BertTokenizer.from_pretrained(args.model_type)
encodings = tokenizer(texts, truncation=True, max_length=max_seq_length[args.task], padding=True)
encodings['labels'] = labels
cached_features_file = os.path.join(data_path, 'cached_{}_{}_{}_{}'.format(
data_type,
list(filter(None, args.model_type.split('/'))).pop(),
str(max_seq_length[args.task]),
str(args.task)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="ag")
parser.add_argument("--model_type", type=str, default='bert-base-uncased')
args = parser.parse_args()
data_types = ['train' , 'test']
for data_type in data_types:
print(f"Proceesing {args.task} data - {data_type} set")
if args.task == 'ag':
process_and_cache_data_ag(args, data_type)
elif args.task == 'imdb':
process_and_cache_data_imdb(args, data_type)
|
bracha-laufer/pareto-testing
|
data_utils/process_data.py
|
process_data.py
|
py
| 3,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.reader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_ALL",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "dataset.max_seq_length",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "dataset.max_seq_length",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "dataset.max_seq_length",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "dataset.max_seq_length",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 91,
"usage_type": "call"
}
] |
72532173629
|
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=protected-access
# pylint:disable=not-context-manager
from typing import Iterator
import pytest
import respx
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pytest_simcore.helpers.typing_env import EnvVarsDict
from respx.router import MockRouter
from simcore_service_catalog.api.dependencies.director import get_director_api
from simcore_service_catalog.core.application import init_app
from simcore_service_catalog.services.director import DirectorApi
@pytest.fixture
def minimal_app(
monkeypatch: pytest.MonkeyPatch, service_test_environ: EnvVarsDict
) -> Iterator[FastAPI]:
# disable a couple of subsystems
monkeypatch.setenv("CATALOG_POSTGRES", "null")
monkeypatch.setenv("SC_BOOT_MODE", "local-development")
app = init_app()
yield app
@pytest.fixture()
def client(minimal_app: FastAPI) -> Iterator[TestClient]:
# NOTE: this way we ensure the events are run in the application
# since it starts the app on a test server
with TestClient(minimal_app) as client:
yield client
@pytest.fixture
def mocked_director_service_api(minimal_app: FastAPI) -> Iterator[MockRouter]:
with respx.mock(
base_url=minimal_app.state.settings.CATALOG_DIRECTOR.base_url,
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
respx_mock.head("/", name="healthcheck").respond(200, json={"health": "OK"})
respx_mock.get("/services", name="list_services").respond(
200, json={"data": ["one", "two"]}
)
yield respx_mock
async def test_director_client_setup(
mocked_director_service_api: MockRouter,
minimal_app: FastAPI,
client: TestClient,
):
# gets director client as used in handlers
director_api = get_director_api(minimal_app)
assert minimal_app.state.director_api == director_api
assert isinstance(director_api, DirectorApi)
# use it
data = await director_api.get("/services")
# director entry-point has hit
assert mocked_director_service_api["list_services"].called
# returns un-enveloped response
assert data == ["one", "two"]
|
ITISFoundation/osparc-simcore
|
services/catalog/tests/unit/test_services_director.py
|
test_services_director.py
|
py
| 2,261 |
python
|
en
|
code
| 35 |
github-code
|
6
|
[
{
"api_name": "pytest.MonkeyPatch",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pytest_simcore.helpers.typing_env.EnvVarsDict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "simcore_service_catalog.core.application.init_app",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterator",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "typing.Iterator",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "respx.mock",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterator",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "respx.router.MockRouter",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "respx.router.MockRouter",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "simcore_service_catalog.api.dependencies.director.get_director_api",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "simcore_service_catalog.services.director.DirectorApi",
"line_number": 66,
"usage_type": "argument"
}
] |
43344923713
|
# coding=utf-8
__author__ = 'Boris Tsema'
import time
import cPickle
from collections import defaultdict
import json
import re
import numpy as np
from twisted.internet import defer
from twisted.python import log
from gorynych.common.infrastructure.persistence import np_as_text
from gorynych.common.infrastructure import persistence as pe
from gorynych.common.exceptions import NoAggregate
from gorynych.processor.domain import track
class PickledTrackRepository(object):
def save(self, data):
f = open('track_repo', 'wb')
cPickle.dump(data, f, -1)
f.close()
NEW_TRACK = """
INSERT INTO track (start_time, end_time, track_type, track_id)
VALUES (%s, %s, (SELECT id FROM track_type WHERE name=%s), %s)
RETURNING ID;
"""
INSERT_SNAPSHOT = """
INSERT INTO track_snapshot (timestamp, id, snapshot) VALUES(%s, %s, %s)
"""
def find_aftertasks_snapshots(data):
'''
@param data:
@type data: L{gorynych.processor.domain.track.Track}
@return: dict with timestamp as a key and state set as a value.
@rtype: C{dict}
'''
result = defaultdict(set)
state = data._state
# Every track is in air from it's first point by default.
# TODO: change it someday.
if len(data.points) == 0:
return result
result[int(data.points['timestamp'][0])].add('in_air_true')
if not state.in_air and state.in_air_changed:
result[int(state.in_air_changed)].add('in_air_false')
if state.state == 'finished':
result[int(state.statechanged_at)].add('finished')
if state.finish_time:
result[int(state.finish_time)].add('es_taken')
if state.start_time:
result[int(state.start_time)].add('started')
return result
def get_states_from_events(obj):
'''
Read states in track and create corresponding snapshots.
@param obj:
@type obj: L{gorynych.processor.domain.track.Track}
@return: dict with timestamp as a key and state set as a value.
@rtype: C{dict}
@rtype: C{dict}
'''
result = defaultdict(set)
lookedup_events = ['TrackStarted', 'TrackFinished',
'TrackFinishTimeReceived', 'TrackLanded', 'TrackInAir']
map_event = dict(TrackStarted='started',
TrackFinished='finished',
TrackFinishTimeReceived='es_taken',
TrackLanded='in_air_false',
TrackInAir='in_air_true')
if len(obj.changes) == 0:
return result
for ev in obj.changes:
if ev.name in lookedup_events:
result[ev.occured_on].add(map_event[ev.name])
return result
class TrackRepository(object):
duplicate_key_ts = r'Key.*\((\d*)\,.*already exists'
def __init__(self, pool):
self.pool = pool
@defer.inlineCallbacks
def get_by_id(self, id):
data = yield self.pool.runQuery(pe.select('track'), (str(id),))
if not data:
raise NoAggregate("%s %s" % ('Track', id))
track_id, _id = data[0]
tid = track.TrackID.fromstring(track_id)
event_list = yield pe.event_store().load_events(tid)
result = track.Track(tid, event_list)
result._id = long(_id)
defer.returnValue(result)
def save(self, obj):
def handle_Failure(failure):
log.err(failure)
return obj.reset()
d = defer.succeed(1)
if obj.changes:
d.addCallback(lambda _: pe.event_store().persist(obj.changes))
if not obj._id:
d.addCallback(lambda _: self.pool.runInteraction(self._save_new,
obj))
else:
d.addCallback(lambda _: self.pool.runWithConnection(self._update,
obj))
d.addCallback(self._update_times)
d.addCallback(self._save_snapshots)
d.addCallback(lambda obj: obj.reset())
d.addErrback(handle_Failure)
return d
def _save_new(self, cur, obj):
cur.execute(NEW_TRACK, (obj._state.start_time, obj._state.end_time,
obj.type.type, str(obj.id)))
dbid = cur.fetchone()[0]
log.msg("New track inserted %s and its id %s" % (obj.id, dbid))
if len(obj.points) > 0:
points = obj.points
points['id'] = np.ones(len(points)) * dbid
data = np_as_text(points)
try:
cur.copy_expert("COPY track_data FROM STDIN ", data)
except Exception as e:
log.err("Exception occured on inserting points: %r" % e)
obj.buffer = np.empty(0, dtype=track.DTYPE)
obj._id = dbid
return obj
@defer.inlineCallbacks
def _save_snapshots(self, obj):
'''
@param obj: track object
@type obj: L{gorynych.processor.domain.track.Track}
@return:
@rtype: L{gorynych.processor.domain.track.Track}
'''
snaps = get_states_from_events(obj)
for snap in snaps:
try:
yield self.pool.runOperation(INSERT_SNAPSHOT,
(snap, obj._id, json.dumps(list(snaps[snap]))))
except Exception as e:
log.err("Error while inserting snapshot %s:%s for track %s: "
"%r" %
(snap, snaps[snap], obj._id, e))
defer.returnValue(obj)
def _update(self, con, obj):
if len(obj.points) == 0:
return obj
tdiff = int(time.time()) - obj.points[0]['timestamp']
log.msg("Save %s points for track %s" % (len(obj.points), obj._id))
log.msg("First points for track %s was %s second ago." % (obj._id,
tdiff))
def try_insert_points(points):
data = np_as_text(points)
cur = con._connection.cursor()
cur.copy_expert("COPY track_data FROM STDIN ", data)
points = obj.points
points['id'] = np.ones(len(points)) * obj._id
while True:
try:
try_insert_points(points)
break
except Exception as e:
if e.pgcode == '23505':
dup_tc = re.findall(self.duplicate_key_ts, e.message)
if not dup_tc:
break
dup_tc = int(dup_tc[0])
idx = np.where(points['timestamp'] != dup_tc)
points = points[idx]
if len(points) == 0:
break
con._connection.rollback()
else:
log.err("Error occured while COPY data on update for track %s: "
"%r" % (obj._id, e))
obj.buffer = np.empty(0, dtype=track.DTYPE)
return obj
def _update_times(self, obj):
d = defer.succeed(1)
for idx, item in enumerate(obj.changes):
if item.name == 'TrackStarted':
t = obj._state.start_time
d.addCallback(lambda _:self.pool.runOperation(
"UPDATE track SET start_time=%s WHERE ID=%s", (t,
obj._id)))
if item.name == 'TrackEnded':
t = obj._state.end_time
d.addCallback(lambda _:self.pool.runOperation(
"UPDATE track SET end_time=%s WHERE ID=%s",
(t, obj._id)))
d.addCallback(lambda _:obj)
return d
|
DmitryLoki/gorynych
|
gorynych/processor/infrastructure/persistence.py
|
persistence.py
|
py
| 7,403 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "cPickle.dump",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "gorynych.common.infrastructure.persistence.select",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "gorynych.common.infrastructure.persistence",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "gorynych.common.exceptions.NoAggregate",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "gorynych.processor.domain.track.TrackID.fromstring",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "gorynych.processor.domain.track.TrackID",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "gorynych.processor.domain.track",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "gorynych.common.infrastructure.persistence.event_store",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "gorynych.common.infrastructure.persistence",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "gorynych.processor.domain.track.Track",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "gorynych.processor.domain.track",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.returnValue",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "twisted.internet.defer",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "twisted.python.log.err",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.succeed",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "gorynych.common.infrastructure.persistence.event_store",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "gorynych.common.infrastructure.persistence",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "twisted.python.log.msg",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "gorynych.common.infrastructure.persistence.np_as_text",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "twisted.python.log.err",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "numpy.empty",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "gorynych.processor.domain.track.DTYPE",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "gorynych.processor.domain.track",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "twisted.python.log.err",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.returnValue",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "twisted.internet.defer",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "twisted.python.log.msg",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "twisted.python.log.msg",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "gorynych.common.infrastructure.persistence.np_as_text",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "twisted.python.log.err",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "numpy.empty",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "gorynych.processor.domain.track.DTYPE",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "gorynych.processor.domain.track",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.succeed",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 203,
"usage_type": "name"
}
] |
29553788733
|
from __future__ import absolute_import, division, print_function, unicode_literals
from .compat import test_cycle
from .schema import ensure_schema
def build_dags(schema, dag_class=None, operator_class=None, sensor_class=None):
"""
:param dict schema: Airflow declarative DAGs schema.
:param dag_class: DAG class. When not specified, the ``airflow.models.DAG``
get used via implicit import.
:param type operator_class: Airflow operator class.
:param type sensor_class: Airflow sensor class.
:rtype: list[airflow.models.DAG]
"""
schema = ensure_schema(schema)
# We use implicit airflow imports by following reasons:
# 1. Airflow project get renamed recently to apache-airflow, so we couldn't
# have (yet) stable dependency on it without conflicts.
# 2. We put the first stone here provide support for custom scheduler and
# worker implementations.
#
if dag_class is None: # pragma: no cover
from airflow import DAG as dag_class
if operator_class is None: # pragma: no cover
from .operators import GenericOperator as operator_class
if sensor_class is None: # pragma: no cover
from .operators import GenericSensor as sensor_class
return [
build_dag(
dag_id,
dag_schema,
dag_class=dag_class,
operator_class=operator_class,
sensor_class=sensor_class,
)
for dag_id, dag_schema in schema["dags"].items()
]
def build_dag(dag_id, schema, dag_class, operator_class, sensor_class):
"""
:param str dag_id: DAG ID.
:param dict schema: DAG definition schema.
:param dag_class: DAG class.
:param type operator_class: Airflow operator class.
:param type sensor_class: Airflow sensor class.
:rtype: airflow.models.DAG
"""
dag = dag_class(dag_id=dag_id, **schema.get("args", {}))
sensors = {
sensor_id: build_sensor(
dag, sensor_id, sensor_schema, sensor_class=sensor_class
)
for sensor_id, sensor_schema in schema.get("sensors", {}).items()
}
operators = {
operator_id: build_operator(
dag, operator_id, operator_schema, operator_class=operator_class
)
for operator_id, operator_schema in schema.get("operators", {}).items()
}
duplicates = set(sensors) & set(operators)
if duplicates:
raise RuntimeError(
"Tasks: %s - are both sensors and operators" % ", ".join(duplicates)
)
build_flow(dict(operators, **sensors), schema.get("flow", {}))
test_cycle(dag)
return dag
def build_sensor(dag, sensor_id, sensor_schema, sensor_class):
"""
:param DAG dag: Airflow DAG instance.
:param str sensor_id: Sensor ID.
:param dict sensor_schema: Sensor definition schema.
:param type sensor_class: Airflow sensor class.
:rtype: airflow.operators.sensors.BaseSensorOperator
"""
return build_task(dag, sensor_id, sensor_schema, task_class=sensor_class)
def build_operator(dag, operator_id, operator_schema, operator_class):
"""
:param DAG dag: Airflow DAG instance.
:param str operator_id: Operator ID.
:param dict operator_schema: Operator definition schema.
:param type operator_class: Airflow operator class.
:rtype: airflow.operators.BaseOperator
"""
return build_task(dag, operator_id, operator_schema, task_class=operator_class)
def build_task(dag, task_id, schema, task_class):
"""
:param airflow.models.DAG dag: DAG object instance.
:param str task_id: Task ID.
:param dict schema: Task schema.
:param type task_class: Airflow operator class.
:rtype: airflow.operators.BaseOperator
"""
args = schema.get("args", {})
callback = schema.get("callback", None)
if callback is not None:
callback_args = schema.get("callback_args", {})
return task_class(
_callback=callback,
_callback_args=callback_args,
task_id=task_id,
dag=dag,
**args
)
task_class = schema.get("class", None) # type: type
if task_class is not None:
return task_class(task_id=task_id, dag=dag, **args)
# Basically, you cannot reach here - schema validation should prevent this.
# But in case if you're lucky here is your exception.
raise RuntimeError(
"nothing to do with %s: %s" % (task_id, schema)
) # pragma: no cover
def build_flow(tasks, schema):
"""
:param dict tasks: Tasks mapping by their ID.
:param dict schema: Flow schema.
"""
for task_id, downstream_ids in schema.items():
try:
task = tasks[task_id]
except KeyError:
raise RuntimeError("unknown task `%s` in flow" % task_id)
else:
downstream_tasks = []
for downstream_idx in downstream_ids:
try:
downstream_tasks.append(tasks[downstream_idx])
except KeyError:
raise RuntimeError(
"unknown downstream task `%s` for %s"
"" % (downstream_idx, task_id)
)
task.set_downstream(downstream_tasks)
|
rambler-digital-solutions/airflow-declarative
|
src/airflow_declarative/builder.py
|
builder.py
|
py
| 5,284 |
python
|
en
|
code
| 128 |
github-code
|
6
|
[
{
"api_name": "schema.ensure_schema",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "operators.GenericOperator",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "operators.GenericSensor",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "airflow.DAG",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "schema.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "operators.GenericSensor",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "schema.get",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "operators.GenericOperator",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "schema.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "schema.get",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "compat.test_cycle",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "operators.GenericSensor",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "operators.GenericOperator",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "schema.get",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "schema.get",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "schema.get",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "schema.get",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "schema.items",
"line_number": 140,
"usage_type": "call"
}
] |
36096958231
|
import numpy as np
from sklearn import datasets
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from .base import BaseAlgorithm
class NaiveBayes(BaseAlgorithm):
def __init__(self):
self._classes = None
self._mean = None
self._var = None
self._priors = None
def fit(self, X, y):
n_samples, n_features = X.shape
self._classes = np.unique(y)
n_classes = len(self._classes)
self._mean = np.zeros((n_classes, n_features), dtype=np.float64)
self._var = np.zeros((n_classes, n_features), dtype=np.float64)
self._priors = np.zeros(n_classes, dtype=np.float64)
for idx, c in enumerate(self._classes):
X_c = X[y == c]
self._mean[idx, :] = X_c.mean(axis=0)
self._var[idx, :] = X_c.var(axis=0)
self._priors[idx] = X_c.shape[0] / float(n_samples)
def predict(self, X):
y_pred = [self._predict(x) for x in X]
return np.array(y_pred)
def _predict(self, x):
posteriors = []
for idx, c in enumerate(self._classes):
prior = np.log(self._priors[idx])
posterior = np.sum(np.log(self._pdf(idx, x)))
posterior = prior + posterior
posteriors.append(posterior)
return self._classes[np.argmax(posteriors)]
def _pdf(self, class_idx, x):
mean = self._mean[class_idx]
var = self._var[class_idx]
numerator = np.exp(- (x - mean) ** 2 / (2 * var))
denominator = np.sqrt(2 * np.pi * var)
return numerator / denominator
# Testing
if __name__ == "__main__":
X, y = datasets.make_classification(n_samples=1000, n_features=10, n_classes=2, random_state=123)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
clf = NaiveBayes()
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print(f"Accuracy: {accuracy_score(predictions, y_test)}")
print(f"Report: {classification_report(y_test, predictions)}")
|
janaSunrise/ML-algorithms-from-scratch
|
algorithms/naive_bayes.py
|
naive_bayes.py
|
py
| 2,115 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "base.BaseAlgorithm",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.unique",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 69,
"usage_type": "call"
}
] |
9392771404
|
import datetime
import json
import os
from subprocess import Popen, PIPE, STDOUT
from time import clock
from flask import Flask, request #, session, g, redirect, url_for, abort, render_template, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
SQLALCHEMY_DATABASE_URI='sqlite:///%s' % os.path.join(app.root_path, 'restty.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
SECRET_KEY='development_key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('RESTTY_SETTINGS', silent=True)
db = SQLAlchemy(app)
class Command(db.Model):
id = db.Column(db.Integer, primary_key=True)
command_name = db.Column(db.String)
args = db.Column(db.String, nullable=True)
start_time = db.Column(db.DateTime)
execution_time = db.Column(db.Integer)
result = db.Column(db.Unicode)
return_code = db.Column(db.Integer)
def __init__(self, command_name, args=None):
self.command_name = command_name
self.args = args
self.execute()
def __repr__(self):
return 'Command(command_name=%r,args=%r)' % (self.command_name, self.args)
def execute(self):
def _exec():
args = self.command_name
if self.args is not None:
args += ' ' + self.args
start = clock()
process = Popen(args, stdout=PIPE, stderr=STDOUT, shell=True)
stdout, _ = process.communicate()
process.wait()
end = clock()
return stdout, process.returncode, end - start
self.start_time = datetime.datetime.now()
stdout, code, time = _exec()
self.execution_time = time
self.return_code = code
self.result = unicode(stdout, encoding='utf-8')
@app.route('/')
def index():
return str(Command.query.count())
@app.route('/exec', methods=['GET'])
def run():
c = request.args.get('c')
if ' ' in c:
command_name, _, args = c.partition(' ')
command = Command(command_name, args)
else:
command = Command(c)
db.session.add(command)
db.session.commit()
return json.dumps({'id': command.id, 'status': command.return_code, 'result': command.result})
@app.route('/history', methods=['GET'])
def history():
return json.dumps([c.command_name for c in Command.query.order_by(db.desc('start_time')).all()])
if __name__ == '__main__':
db.drop_all()
db.create_all()
app.run()
|
will2dye4/restty
|
restty.py
|
restty.py
|
py
| 2,502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "time.clock",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 81,
"usage_type": "call"
}
] |
71357258427
|
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import multiprocessing
import logging
from setting import news_file, word2vec_model_path, word2vec_vectors_path
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(lineno)d - %(message)s')
def word2vec_train():
"""
词向量训练
:param train_file:
:param save_model_file:
:param save_vector_file:
:return:
"""
f_wiki = open(news_file, "r", encoding="utf-8")
sentences = LineSentence(f_wiki)
model = Word2Vec(sentences, size = 100, window = 5, min_count = 7, workers = multiprocessing.cpu_count())
model.save(word2vec_model_path)
model.wv.save_word2vec_format(word2vec_vectors_path, binary = False)
def load_model(fname):
"""
加载模型
:param fname:
:return:
"""
return Word2Vec.load(fname)
if __name__ == "__main__":
# 训练
word2vec_train()
# 导入模型
model = Word2Vec.load(word2vec_model_path)
# 词向量
# print("江西", word2vec_model['江西']) # 获得词向量
print(model.most_similar('江西'))
|
huangmgithub/Automatic_Summarization
|
bin/build_word2vec.py
|
build_word2vec.py
|
py
| 1,131 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "setting.news_file",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "gensim.models.word2vec.LineSentence",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "setting.word2vec_model_path",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "setting.word2vec_vectors_path",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "setting.word2vec_model_path",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 35,
"usage_type": "name"
}
] |
124077703
|
# @Time : 2023/4/2 22:49
# @Author : tk
# @FileName: infer
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))
import torch
from deep_training.data_helper import ModelArguments
from transformers import HfArgumentParser
from data_utils import train_info_args, NN_DataHelper, get_deepspeed_config,build_template
from aigc_zoo.model_zoo.rwkv4.llm_model import MyTransformer, RwkvConfig,set_model_profile
from aigc_zoo.utils.rwkv4_generate import Generate
deep_config = get_deepspeed_config()
if __name__ == '__main__':
parser = HfArgumentParser((ModelArguments,))
(model_args,) = parser.parse_dict(train_info_args, allow_extra_keys=True)
# 可以自行修改 RWKV_T_MAX 推理最大长度
set_model_profile(RWKV_T_MAX=2048, RWKV_FLOAT_MODE='')
dataHelper = NN_DataHelper(model_args)
tokenizer, _, _,_= dataHelper.load_tokenizer_and_config()
config = RwkvConfig.from_pretrained('./best_ckpt')
pl_model = MyTransformer(config=config, model_args=model_args,torch_dtype=torch.float16)
# deepspeed 权重使用转换脚本命令
# 一般根据时间排序选最新的权重文件夹
# cd best_ckpt/last
# python zero_to_fp32.py . ../last.ckpt
train_weight = './best_ckpt/last.ckpt'
pl_model.load_sft_weight(train_weight,strict=True)
# 保存hf权重
# config.save_pretrained('convert/')
# 保存sft p-tuning-v2 权重
# pl_model.save_sft_weight('convert/pytorch_model_sft_ptv2.bin')
# 保存sft权重
# pl_model.save_sft_weight('convert/pytorch_model_sft.bin')
model = pl_model.get_llm_model()
model.eval().half().cuda()
text_list = ["写一个诗歌,关于冬天",
"晚上睡不着应该怎么办",
"从南京到上海的路线",
]
for input in text_list:
query = build_template(input)
response = Generate.generate(model, query=query, tokenizer=tokenizer, max_length=512,
eos_token_id=config.eos_token_id,
pad_token_id=config.eos_token_id,
do_sample=True, top_p=0.85, temperature=1.0, )
print('input',input)
print('output',response)
|
ssbuild/rwkv_finetuning
|
infer/infer_finetuning.py
|
infer_finetuning.py
|
py
| 2,357 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "data_utils.get_deepspeed_config",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "transformers.HfArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "deep_training.data_helper.ModelArguments",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "data_utils.train_info_args",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "aigc_zoo.model_zoo.rwkv4.llm_model.set_model_profile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "data_utils.NN_DataHelper",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "aigc_zoo.model_zoo.rwkv4.llm_model.RwkvConfig.from_pretrained",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "aigc_zoo.model_zoo.rwkv4.llm_model.RwkvConfig",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "aigc_zoo.model_zoo.rwkv4.llm_model.MyTransformer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.float16",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "data_utils.build_template",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "aigc_zoo.utils.rwkv4_generate.Generate.generate",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "aigc_zoo.utils.rwkv4_generate.Generate",
"line_number": 60,
"usage_type": "name"
}
] |
16919929356
|
import json
from pydantic import parse_obj_as
from abc import ABC, abstractmethod
from typing import Any
from aiober.methods.base import Response
from .viber import ViberAPIServer, PRODUCTION
DEFAULT_TIMEOUT: float = 60.0
class BaseSession(ABC):
def __init__(self):
self.api: ViberAPIServer = PRODUCTION
self.timeout = DEFAULT_TIMEOUT
@abstractmethod
async def make_request(self, bot, timeout: int = None):
pass
def check_response(self, bot, status_code: int, content: str) -> Response:
try:
json_data = json.loads(content)
except Exception as E:
raise UnicodeDecodeError("failed to decode object")
print(json_data)
response = parse_obj_as(Response, json_data)
if 200 <= status_code <= 220:
return response
raise RuntimeError(f'status code {status_code}')
|
CodeCraftStudio-Family/aioviber
|
aiober/client/session/base.py
|
base.py
|
py
| 912 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "abc.ABC",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "viber.ViberAPIServer",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "viber.PRODUCTION",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pydantic.parse_obj_as",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "aiober.methods.base.Response",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "aiober.methods.base.Response",
"line_number": 23,
"usage_type": "name"
}
] |
24794202203
|
#!/usr/bin/env python3
import sys, re, argparse
R = re.compile("(?P<number>\d+)\s+(?P<repeat>\d+)R")
def main():
"""
mcnp2phits - converts *some* parts of MCNP deck into PHITS format
"""
parser = argparse.ArgumentParser(description=main.__doc__,
epilog="Homepage: https://github.com/kbat/mc-tools")
parser.add_argument("mcnp", type=str,
help="MCNP file name")
parser.add_argument("phits", type=str,
help="PHITS file name")
args = parser.parse_args()
phits = open(args.phits, 'w')
mcnp = open(args.mcnp, 'r')
for line in mcnp.readlines():
s = R.search(line)
if s:
print(re.sub(R,
lambda m: (m.group('number')+" ")*(int(m.group('repeat'))+1),
line.rstrip()))
else:
print(line.rstrip())
if __name__ == "__main__":
sys.exit(main())
|
kbat/mc-tools
|
mctools/phits/mcnp2phits.py
|
mcnp2phits.py
|
py
| 955 |
python
|
en
|
code
| 38 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 35,
"usage_type": "call"
}
] |
19981907937
|
import re
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import requests
def get_data():
ua = UserAgent()
fake_ua = {'user-agent': ua.random}
url = 'https://tury.ru/hotel/'
req = requests.get(url=url, headers=fake_ua)
response = req.text
soup = BeautifulSoup(response, 'lxml')
location_li = soup.find(class_='select__selected-list scroll').find_all('li')
location_dict = {}
for country in location_li:
location = re.search(r'(?<=span>).+(?=</span)', str(country)).group().strip()
index = int(re.search(r'(?<=\.val\(\')\d+(?=\')', str(country)).group())
location_dict[location] = index
new_string_char = '\n'
your_location_index = location_dict[
input(
f'Choose your location from list: '
f'{new_string_char + new_string_char.join(location_dict.keys()) + new_string_char}'
f'Enter state: ')]
for i in range(0, 40, 20):
location_url = f'https://tury.ru/hotel/?cn={your_location_index}&s={i}'
response = requests.get(url=location_url).text
soup = BeautifulSoup(response, 'lxml')
hotels = [hotel['href'] for hotel in soup.find_all('a', class_='reviews-travel__title')]
if not hotels:
break
for link in hotels:
link_re = re.search(r"(?<=\d-).+", link)
if not link_re:
continue
print(link)
req = requests.get(url=link, headers=fake_ua)
with open(f'data/{link_re.group()}.html', 'w', encoding='utf-8') as file:
file.write(req.text)
def main():
get_data()
if __name__ == '__main__':
main()
|
Baradys/scrappers
|
scrappers/tury/tury_hotels.py
|
tury_hotels.py
|
py
| 1,685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fake_useragent.UserAgent",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 39,
"usage_type": "call"
}
] |
8784215352
|
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
class Organisation(models.Model):
"""
An organisation that the user belongs to.
Eg: user_1 belongs to xyz organisation
# Create an organisation
>>> organisation_1 = Organisation.objects.create(organisation_name="xyz", organisation_logo="image1.png")
>>> organisation_2 = Organisation.objects.create(organisation_name="abc", additional_details="Mumbai based company")
"""
organisation_name = models.CharField(
max_length=200,
primary_key=True,
)
organisation_logo = models.ImageField(
upload_to='Organisation/Organisation Logo/%Y/%m/%d/',
null=True,
blank=True,
verbose_name="Organisation Logo"
)
additional_details = models.TextField(
blank=True
)
def __str__(self):
return self.organisation_name
class Profile(models.Model):
"""
A Profile associated with an existing user.
Eg: organisation name and phone number are some profile details associated with user_1
# Create a user profile
>>> prof_1 = Profile.objects.create(user=user_1, organisation_name="abc", profile_picture="image2.png")
>>> prof_2 = Profile.objects.create(user=user_2, organisation_name="abc", phone_number="9999999999")
"""
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True
)
organisation_name = models.ForeignKey(
Organisation,
on_delete=models.CASCADE,
null=True,
)
profile_picture = models.ImageField(
upload_to='Organisation/Employee Profile Picture/%Y/%m/%d/',
null=True,
blank=True,
verbose_name="Employee Profile picture"
)
phone_number = models.CharField(
blank=True,
max_length=10
)
def __str__(self):
return self.user.username # pylint: disable = E1101
class Header(models.Model):
"""
A Header list associated with each organisation.
Eg: Organisation xyz may contain headers in the csv file such as- user_id, title etc
# Create a header instance
>>> Header.objects.create(organisation_name="xyz", header="user_id")
"""
organisation_name = models.ForeignKey(
Organisation,
on_delete=models.CASCADE,
)
header = models.CharField(
max_length=200
)
class Meta:
verbose_name_plural = 'Headers'
class Category(models.Model):
"""
A Category list associated with each organisation.
Eg: Organisation xyz may contain categories in the csv file such as- hawkers, garbage etc
# Create a category instance
>>> Category.objects.create(organisation_name="xyz", category="hawkers")
"""
organisation_name = models.ForeignKey(
Organisation,
on_delete=models.CASCADE,
)
category = models.CharField(
max_length=200
)
class Meta:
verbose_name_plural = 'Category'
class File(models.Model):
"""
A File uploaded by the logged-in user.
Eg: user_1 may upload a .csv file on 12/12/12
# Create a file instance
>>> File.objects.create(uploaded_by=user_1, csv_file="file1.csv", uploaded_date = "Jan. 29, 2019, 7:59 p.m.")
"""
uploaded_by = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
csv_file = models.FileField(
upload_to='Organisation/CSV File/%Y/%m/%d/',
)
uploaded_date = models.DateTimeField(
default=datetime.now,
)
class Meta:
verbose_name_plural = 'CSV File Meta'
|
simranmadhok/Venter_CMS
|
Venter/models.py
|
models.py
|
py
| 3,615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 117,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "django.db.models.FileField",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 124,
"usage_type": "name"
}
] |
31927207952
|
from fastapi import FastAPI, Request, Response
import http, csv, json, yaml
import xml.etree.ElementTree as ET
app = FastAPI()
@app.get("/read-txt")
def _readTxtEndpoint():
with open('./text_file.txt') as f:
lines = f.read()
return {"resultSet": lines}
@app.get("/read-csv")
def _readCsvEndpoint():
with open('./csv_file.csv') as csv_file:
list = []
reader = csv.reader(csv_file)
for row in reader:
list.append(row)
csv_file.close()
keys = list[0]
values = list[1]
myDict = {k: v for k, v in zip(keys, values)}
return {"resultSet": myDict}
@app.get("/read-yaml")
def _readYamlEndpoint():
with open('./yaml_file.yaml') as yaml_file:
output = yaml.safe_load(yaml_file)
return {"resultSet": output}
@app.get("/read-xml")
def _readXmlEndpoint():
root = ET.parse('./xml_file.xml').getroot()
output = root[0].text
return {"resultSet": output}
|
DavidKrtolica/system_integration_repo
|
data_format_translation_servers [INDIVIDUAL]/python/main.py
|
main.py
|
py
| 950 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 35,
"usage_type": "name"
}
] |
23916514495
|
import torch
import os
import numpy as np
import csv
from torch.utils.data import Dataset
from torchvision import transforms
import torchvision
from PIL import Image
import json
default_transform = transforms.Compose([
transforms.ToTensor(),
])
class iclevr_dataset(Dataset):
def __init__(self, args, mode='train', transform=default_transform):
self.root = args.data_root
self.mode = mode
if mode == 'train':
with open(args.train_json, 'r') as file:
self.data = list(json.load(file).items())
elif mode == 'test':
with open(args.test_json, 'r') as file:
self.data = json.load(file)
elif mode == 'new_test':
with open(args.new_test_json, 'r') as file:
self.data = json.load(file)
with open('../dataset/objects.json', 'r') as file:
self.object_dict = json.load(file)
self.cls = len(self.object_dict)
def __len__(self):
return len(self.data)
def get_img(self, index):
fname = f'{self.root}/{self.data[index][0]}'
image = torchvision.io.read_image(fname).to(torch.float32)[:3]
size = min(image.shape[1:])
image = image / 255.
transform = transforms.Compose([
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
transforms.CenterCrop(size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.Resize(64, antialias=True)
])
return transform(image)
def get_cond(self, index):
cond = self.data[index][1] if self.mode == 'train' else self.data[index]
one_hot_cond = torch.zeros(self.cls)
for label in cond:
one_hot_cond[self.object_dict[label]] = 1.0
return one_hot_cond
def __getitem__(self, index):
if self.mode == 'train':
img = self.get_img(index)
cond = self.get_cond(index)
return img, cond
else:
cond = self.get_cond(index)
return cond
|
ToooooooT/Deep-Learning
|
lab07/source_code/dataset.py
|
dataset.py
|
py
| 2,146 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.transforms.Compose",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.io.read_image",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torchvision.io",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 50,
"usage_type": "call"
}
] |
12509878797
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
from collections import deque
class Solution:
def reorderList(self, head: Optional[ListNode]) -> None:
if not head:
return
q=deque()
node=head
while node: #insert elements of linked list into a queue
node=node.next
if not node:
break
q.append(node)
while q:
if head: #pop last element from queue and insert in head.next
temp=q.pop()
head.next=temp
head=head.next
if head and q: #pop first element from queue and insert in head.next
temp=q.popleft()
head.next=temp
head=head.next
head.next=None
|
Tanya-Katiyar/Leetcode
|
0143-reorder-list/0143-reorder-list.py
|
0143-reorder-list.py
|
py
| 927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
71779835068
|
'''
1. CALCULA CUANTA INVERSION VALE LA PENA PARA CADA FIN DE AÑO USANDO EL PRECIO DADO EN EL CODIGO.
2. REALIZA UN GRAFICO PARA MOSTRAR CUANTO CAMBIA EL VALOR DE TU INVERSION DE 1000 EN UN AÑO.
PRIMERO CALCULA CUANTOS BITCOINS TENDRAS AL INICIO DIVIDIENDO SU INVERSIÓN POR EL COSTO DEL BITCOIN EN EL PRIMER AÑO (EL PRIMER ELEMENTO DEL ARREGLO QUE SE DA).
LUEGO MULTIPLICA EL ARREGLO ENTERO DE LOS PRECIOS POR ESE NUMERO PARA OBTENER EL VALOR DE CADA AÑO. USA np.multiply(array,number) PARA MULTIPLICAR UN ARREGLO POR
UN NÚMERO.
'''
#ANTERIRORMENTE:
import numpy as np
import numpy_financial as npf
#PRECIO DE 2018 A 2021:
bitcoin = [3869.47,7188.46,22203.31,29391.78]
print(np.std(bitcoin))
bitcoin_IRR = [-500000, 3869.47*10, 7188.46*10, 22203.31*10, 29391.78*10]
print(npf.irr(bitcoin_IRR))
#INVERSION INICIAL:
investment_ini = 1000
bitcoins_ini = investment_ini/bitcoin[0]
print(bitcoins_ini)
valor_anual = np.multiply(bitcoin,bitcoins_ini)
print(valor_anual)
#GRAFICO:
import matplotlib.pyplot as plt
años = [2018,2019,2020,2021]
plt.plot(años,valor_anual)
plt.savefig('Crecimiento de Inversion Anual')
|
Giovanny100/archivos_Trabajo
|
archivos_py/calculos_financieros/CALCULOS BASICOS/grafico_bitcoin.py
|
grafico_bitcoin.py
|
py
| 1,130 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.std",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy_financial.irr",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
}
] |
17689670172
|
import os
from torch.utils.data import Dataset
from torchvision.transforms import RandomCrop, Resize, InterpolationMode, RandomHorizontalFlip
from torchvision.transforms.functional import rotate
from torchvision.io import read_image
import numpy as np
class ImageData(Dataset):
def __init__(self, data_path, HR_shape=None, training=True):
super(ImageData, self).__init__()
self.data_path = data_path
self.data = os.listdir(data_path)
self.training = training
if training:
LR_shape = HR_shape // 4
self.crop = RandomCrop((HR_shape, HR_shape), pad_if_needed=True)
self.resize = Resize((LR_shape, LR_shape), InterpolationMode.BICUBIC)
self.rand_flip = RandomHorizontalFlip()
else:
self.crop = RandomCrop((400, 400), pad_if_needed=True)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
file_name = self.data[index]
image = read_image(os.path.join(self.data_path, file_name))
image = self.crop(image)
if self.training:
image = self.rand_flip(image)
if np.random.rand() < 0.5:
image = rotate(image, 90)
LR_image = self.resize(image) / 255.0
else:
LR_image = Resize((image.shape[1] // 4, image.shape[2] // 4), InterpolationMode.BICUBIC)(image) / 255.0
HR_image = 2 * (image / 255.0) - 1
return LR_image, HR_image
|
abed11326/Training-a-Super-Resolution-GAN-for-4x-image-upscaling
|
imageData.py
|
imageData.py
|
py
| 1,492 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomCrop",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.InterpolationMode.BICUBIC",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.InterpolationMode",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomCrop",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.io.read_image",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.functional.rotate",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.InterpolationMode.BICUBIC",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.InterpolationMode",
"line_number": 36,
"usage_type": "name"
}
] |
33383350794
|
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from .models import *
import bcrypt
# Create your views here.
def index(request):
return render(request, 'index.html')
def register(request):
if request.method != 'POST':
return redirect ('/')
errors = User.objects.registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
new_user = User.objects.create(
first_name = request.POST['first_name'],
last_name = request.POST['last_name'],
email = request.POST['email'],
password = pw_hash
)
request.session['userid'] = new_user.id
request.session['first_name'] = new_user.first_name
request.session['last_name'] = new_user.last_name
#messages.info(request, "User registered; log in now")
return redirect('/quotes')
def login(request):
if request.method != 'POST':
return redirect('/')
errors = User.objects.login_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
user = User.objects.filter(email=request.POST['email'])
if user:
logged_user = user[0]
if bcrypt.checkpw(request.POST['password'].encode(), logged_user.password.encode()):
request.session['userid'] = logged_user.id
request.session['first_name'] = logged_user.first_name
request.session['last_name'] = logged_user.last_name
return redirect('/quotes')
messages.error(request, "Email and password are incorrect")
return redirect('/')
def logout(request):
request.session.flush()
return redirect('/')
def quotes(request):
if 'userid' not in request.session:
return redirect('/')
else:
logged_user = User.objects.get(id=request.session['userid'])
all_quotes = Quote.objects.all().order_by('created_at')
context ={
#'my_wishes': Wish.objects.filter(wished_by= logged_user),
'all_quotes': all_quotes,
'user': logged_user
}
return render(request, 'quotes.html', context)
def create(request):
if 'userid' not in request.session:
return redirect('/')
errors = Quote.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/quotes')
logged_user = User.objects.get(id=request.session['userid'])
Quote.objects.create(
author = request.POST['author'],
content = request.POST['content'],
posted_by = logged_user
)
return redirect('/quotes')
def editmyaccount(request, userid):
if 'userid' not in request.session:
return redirect('/')
else:
logged_user = User.objects.get(id=request.session['userid'])
context = {
'user': logged_user
}
return render(request, 'edit.html', context)
def update(request, userid):
if 'userid' not in request.session:
return redirect('/')
errors = User.objects.edit_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/editmyaccount/{}".format(userid))
# update wish!
to_update = User.objects.get(id=userid)
# updates each field
to_update.first_name = request.POST['first_name']
to_update.last_name = request.POST['last_name']
to_update.email = request.POST['email']
to_update.save()
return redirect('/quotes')
def profile(request,userid):
if 'userid' not in request.session:
return redirect('/')
else:
user = User.objects.get(id=userid)
user_quotes = Quote.objects.filter(posted_by=userid).order_by('created_at')
context ={
#'my_wishes': Wish.objects.filter(wished_by= logged_user),
'user_quotes': user_quotes,
'user': user
}
return render(request, 'profile.html', context)
def like(request,quoteid):
if 'userid' not in request.session:
return redirect('/')
if request.method == "POST":
logged_user = User.objects.get(id=request.session['userid'])
quote = Quote.objects.get(id=quoteid)
liked_users = quote.user_that_like_quote
liked_users.add(logged_user)
return redirect('/quotes')
def delete(request,quoteid):
to_delete = Quote.objects.get(id=quoteid)
to_delete.delete()
return redirect('/quotes')
|
destinyng/quote_project_BlackBelt
|
quote_app/views.py
|
views.py
|
py
| 4,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bcrypt.hashpw",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bcrypt.gensalt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bcrypt.checkpw",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 150,
"usage_type": "call"
}
] |
42214943545
|
"""
/*** 本模块实现了自定义音乐查询获取并返回音乐CQ码kuq接口进行反向传输 ****/
/*** 音乐可以来自任何平台,并且支持查询操作****/
/*** write by @fengx1a0
"""
class FindMusic():
def __init__(self,key):
self.__key = key
import requests
self.__request = requests.get
handle = self.__request(url="http://musicapi.leanapp.cn/search?keywords="+self.__key)
_json = handle.json()
self.__id = str(_json['result']['songs'][0]['id'])
self.__songname = _json['result']['songs'][0]['name']
self.__albumid = str(_json['result']['songs'][0]['album']['id'])
tmp = _json['result']['songs'][0]['artists']
self.__auth = ''
for i in tmp:
self.__auth+=i["name"]
self.__auth+="/"
self.__auth = self.__auth[:-1]
handle.close()
def get_url(self):
return "https://music.163.com/#/song?id="+self.__id
def get_image(self):
handle = self.__request(url="http://musicapi.leanapp.cn/album?id="+self.__albumid)
_json = handle.json()
imageurl = _json['songs'][0]['al']['picUrl']
handle.close()
return imageurl
def getaudio(self):
return "https://music.163.com/song/media/outer/url?id="+self.__id+".mp3"
def gettitle(self):
return self.__songname
def getcontent(self):
return self.__auth
def get_music(msg): # 音乐名+fuzz搜索
music=msg[6:-1]
musicInfo = FindMusic(msg)
try:
musicInfo = FindMusic(msg)
except:
return "呜呜呜~该音乐未找到..."
msg = "[CQ:music,type=custom,url={},audio={},title={},content={},image={}]".format(musicInfo.get_url() ,musicInfo.getaudio(),musicInfo.gettitle(),musicInfo.getcontent(),musicInfo.get_image())
print(msg)
return msg
|
fengx1a0/just_robot
|
HexRun/music_enhanced_module.py
|
music_enhanced_module.py
|
py
| 1,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "attribute"
}
] |
11409286861
|
import os
from deep_training.data_helper import ModelArguments, TrainingArguments, DataArguments
from transformers import HfArgumentParser
from data_utils import train_info_args, NN_DataHelper
from models import MyTransformer, ChatGLMTokenizer, setup_model_profile, ChatGLMConfig, LoraArguments, global_args, \
InvalidScoreLogitsProcessor, LogitsProcessorList
if __name__ == '__main__':
import json
from utils.prompter import Prompter
from tqdm import trange, tqdm
from alpaca2qa_loan_aug import process_profile
file_for_inference = 'dialog4_aug.json'
data_path = '/home/fm001/wangyuxuan/data/loan'
prompter = Prompter('loan_template')
train_info_args['seed'] = None
parser = HfArgumentParser((ModelArguments, DataArguments))
model_args, data_args = parser.parse_dict(train_info_args, allow_extra_keys=True)
setup_model_profile()
dataHelper = NN_DataHelper(model_args, None, data_args)
tokenizer: ChatGLMTokenizer
tokenizer, _, _, _ = dataHelper.load_tokenizer_and_config(
tokenizer_class_name=ChatGLMTokenizer, config_class_name=ChatGLMConfig)
ckpt_name = 'epoch_9'
ckpt_path = 'output_loan_alpaca-dialog4_aug_v3'
ckpt_dir = f'./{ckpt_path}/best_ckpt_{ckpt_name}'
config = ChatGLMConfig.from_pretrained(ckpt_dir)
config.initializer_weight = False
lora_args = LoraArguments.from_pretrained(ckpt_dir)
assert lora_args.inference_mode is True and config.pre_seq_len is None
pl_model = MyTransformer(config=config, model_args=model_args, lora_args=lora_args,
# load_in_8bit=global_args["load_in_8bit"],
# # device_map="auto",
# device_map = {"":0} # 第一块卡
)
# 加载lora权重
pl_model.load_sft_weight(ckpt_dir)
if getattr(pl_model.get_llm_model(), "is_loaded_in_8bit", False):
pl_model.eval().cuda()
else:
pl_model.eval().half().cuda()
enable_merge_weight = False
if enable_merge_weight:
# 合并lora 权重 保存
pl_model.save_sft_weight(os.path.join(ckpt_dir, 'pytorch_model_merge.bin'), merge_lora_weight=True)
else:
model = pl_model.get_llm_model()
# prepare data
with open(os.path.join(data_path, file_for_inference), mode='r', encoding='utf-8') as f:
list_data_dict = json.loads(f.read())['items']
for example in list_data_dict:
example['input'] = process_profile(example['input'])[2]
prompt_list = [prompter.generate_prompt(example['instruction'], example['input']) for example in list_data_dict]
# set params
max_length = 2048
num_beams = 1
do_sample = True
top_p = 0.7
temperature = 0.95
logits_processor = LogitsProcessorList()
logits_processor.append(InvalidScoreLogitsProcessor())
gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
"temperature": temperature, "logits_processor": logits_processor}
# inference by batch
response_list = []
global_batch_size = 50
for i in trange(0, len(prompt_list), global_batch_size):
tmp_prompt_list = prompt_list[i:i + global_batch_size]
inputs = tokenizer(tmp_prompt_list, return_tensors="pt", padding=True)
inputs = inputs.to(model.device)
outputs = model.generate(**inputs, **gen_kwargs)
response_list.extend(
[model.process_response(tokenizer.decode(output[len(inputs["input_ids"][0]):])) for output in
outputs.tolist()])
assert len(prompt_list) == len(response_list)
# update response
for idx, example in tqdm(enumerate(list_data_dict)):
example.update({
"output_sft": response_list[idx]
})
# save file
file_save_path = os.path.join(data_path, ckpt_path)
if not os.path.exists(os.path.join(data_path, ckpt_path)):
os.makedirs(file_save_path)
with open(os.path.join(file_save_path, f"sft-{ckpt_name}-" + file_for_inference), mode='w', encoding='utf-8',
newline='\n') as f:
for line in list_data_dict:
f.write(json.dumps(line, ensure_ascii=False) + '\n')
|
kavin525zhang/AIGC
|
pretrained_model/ChatGLM/loan_collection/infer_lora_batch.py
|
infer_lora_batch.py
|
py
| 4,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.prompter.Prompter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "data_utils.train_info_args",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "transformers.HfArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "deep_training.data_helper.ModelArguments",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "deep_training.data_helper.DataArguments",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "data_utils.train_info_args",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "models.setup_model_profile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "data_utils.NN_DataHelper",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.ChatGLMTokenizer",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "models.ChatGLMTokenizer",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.ChatGLMConfig",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.ChatGLMConfig.from_pretrained",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.ChatGLMConfig",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "models.LoraArguments.from_pretrained",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.LoraArguments",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "models.MyTransformer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "alpaca2qa_loan_aug.process_profile",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "models.LogitsProcessorList",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "models.InvalidScoreLogitsProcessor",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tqdm.trange",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 102,
"usage_type": "call"
}
] |
40359316924
|
from django.db.models import fields
from django.forms.forms import Form
from django.shortcuts import render, redirect, HttpResponseRedirect
from django.urls import reverse
from django.views.generic import ListView, DetailView, UpdateView, CreateView, FormView
from django.http import Http404
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from . import models as noti_model
from reviews import forms
from notifications import forms as noti_forms
from users import mixins
from users import models as user_model
# Create your views here.
def BoardView(request):
page = request.GET.get("page", 1)
notifications = noti_model.Posting.objects.filter(notification=True)
normals = noti_model.Posting.objects.filter(notification=False)
paginator = Paginator(normals, 14, orphans=5)
normals = paginator.page(int(page))
return render(
request,
"notifications/posting_list.html",
{"normals": normals, "notifications": notifications},
)
@login_required
def post_detail(request, pk):
try:
post = noti_model.Posting.objects.get(pk=pk)
print(post.notification)
form = forms.CreateCommentForm()
user_comment = None
if request.method == "POST":
print(request.POST)
comment_form = forms.CreateCommentForm(request.POST)
if comment_form.is_valid():
user_comment = comment_form.save(commit=False)
user_comment.post = post
user_comment.user = request.user
user_comment.save()
return HttpResponseRedirect(
reverse("notifications:detail", kwargs={"pk": pk}),
{"post": post, "form": form},
) ## to prevent us from double submit
return render(
request, "notifications/detail.html", {"post": post, "form": form}
)
except noti_model.Posting.DoesNotExist:
return Http404
def search(request):
filter_args = {}
keyword = request.GET.get("keyword")
print(keyword)
if keyword != None:
filter_args["title__contains"] = keyword
results = noti_model.Posting.objects.filter(**filter_args)
else:
results = noti_model.Posting.objects.all()
page = request.GET.get("page", 1)
paginator = Paginator(results, 12, orphans=5)
posts = paginator.page(int(page))
return render(
request,
"notifications/search.html",
{"page": posts, "keyword": keyword},
)
class PostPhotosView(DetailView):
model = noti_model.Posting
template_name = "notifications/post_photos.html"
def get_object(self, queryset=None):
post = super().get_object(queryset=queryset)
if post.user.pk != self.request.user.pk:
raise Http404()
return post
class EditPhotoView(UpdateView):
model = noti_model.Photo
template_name = "notifications/photo_edit.html"
fields = ("caption",)
pk_url_kwarg = "photo_pk"
def get_success_url(self):
post_pk = self.kwargs.get("post_pk")
return reverse("notifications:photos", kwargs={"pk": post_pk})
class AddPhotoView(FormView):
template_name = "notifications/photo_create.html"
form_class = noti_forms.CreatePhotoForm
def form_valid(self, form):
pk = self.kwargs.get("pk")
form.save(pk)
return redirect(reverse("notifications:photos", kwargs={"pk": pk}))
class UploadPostView(mixins.LoggedInOnlyView, FormView):
template_name = "notifications/post_create.html"
form_class = noti_forms.CreatePostForm
def form_valid(self, form):
noti = self.request.POST.get("notificataion")
if noti == "on":
bool = True
else:
bool = False
pk = self.request.user.pk
form.save(pk, bool)
return redirect("/notifications/")
@login_required
def delete_photo(request, post_pk, photo_pk):
user = request.user
try:
post = noti_model.Posting.objects.get(pk=post_pk)
if post.user != user:
messages.error(request, "You are not athorized")
else:
noti_model.Photo.objects.filter(pk=photo_pk).delete()
messages.success(request, "Photo Deleted")
return redirect(reverse("notifications:photos", kwargs={"pk": post_pk}))
except noti_model.Posting.DoesNotExist:
return redirect(reverse("core:home"))
|
glauke1996/Kindergarten_Project
|
notifications/views.py
|
views.py
|
py
| 4,518 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.core.paginator.Paginator",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "reviews.forms.CreateCommentForm",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "reviews.forms",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "reviews.forms.CreateCommentForm",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "reviews.forms",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.HttpResponseRedirect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.http.Http404",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.views.generic.FormView",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "notifications.forms.CreatePhotoForm",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "notifications.forms",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "users.mixins.LoggedInOnlyView",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "users.mixins",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "django.views.generic.FormView",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "notifications.forms.CreatePostForm",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "notifications.forms",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 126,
"usage_type": "name"
}
] |
22219030346
|
from director.consoleapp import ConsoleApp
from director import robotsystem
from director import visualization as vis
from director import objectmodel as om
from director import ikplanner
from director import ikconstraintencoder as ce
from director import ikconstraints
from director import transformUtils
import numpy as np
import pprint
import json
def getRobotState():
return robotStateJointController.q.copy()
def buildConstraints():
'''
For testing, build some constraints and return them in a list.
'''
startPose = getRobotState()
startPoseName = 'plan_start'
endPoseName = 'plan_end'
ikPlanner.addPose(startPose, startPoseName)
ikPlanner.addPose(startPose, endPoseName)
constraints = []
constraints.extend(ikPlanner.createFixedFootConstraints(startPoseName))
constraints.append(ikPlanner.createMovingBaseSafeLimitsConstraint())
constraints.append(ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
constraints.append(ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
constraints.append(ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
targetFrame = ikPlanner.getLinkFrameAtPose(ikPlanner.getHandLink('left'), startPose)
p, o = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
p.tspan = [1.0, 1.0]
o.tspan = [1.0, 1.0]
constraints.extend([p, o])
return constraints
def reconstructConstraints(constraints):
'''
Convert dicts (decoded from json) back to the original
constraint classes using the 'class' information in the dict
'''
objs = []
for c in constraints:
objClass = getattr(ikconstraints, c['class'])
del c['class']
obj = objClass()
objs.append(obj)
for attr, value in c.items():
if isinstance(value, dict) and 'position' in value and 'quaternion' in value:
value = transformUtils.transformFromPose(value['position'], value['quaternion'])
setattr(obj, attr, value)
return objs
def testPlanConstraints():
ikPlanner.planningMode = 'dummy'
constraints = buildConstraints()
poses = ce.getPlanPoses(constraints, ikPlanner)
poseJsonStr = json.dumps(poses, indent=4)
constraintsJsonStr = ce.encodeConstraints(constraints, indent=4)
print(poseJsonStr)
print(constraintsJsonStr)
print('--------------decoding--------------------')
constraints = ce.decodeConstraints(constraintsJsonStr)
pprint.pprint(constraints)
print('--------------reconstructing--------------')
constraints = reconstructConstraints(constraints)
print('--------------matlab commands---------------')
for c in constraints:
c.printCommands()
app = ConsoleApp()
view = app.createView()
robotSystem = robotsystem.create(view=view, planningOnly=True)
app.setupGlobals(globals())
globals().update(dict(robotSystem))
testPlanConstraints()
|
RobotLocomotion/director
|
src/python/tests/testPlanConstraints.py
|
testPlanConstraints.py
|
py
| 2,959 |
python
|
en
|
code
| 176 |
github-code
|
6
|
[
{
"api_name": "director.ikconstraints",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "director.transformUtils.transformFromPose",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "director.transformUtils",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "director.ikconstraintencoder.getPlanPoses",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "director.ikconstraintencoder",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "director.ikconstraintencoder.encodeConstraints",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "director.ikconstraintencoder",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "director.ikconstraintencoder.decodeConstraints",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "director.ikconstraintencoder",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "pprint.pprint",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "director.consoleapp.ConsoleApp",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "director.robotsystem.create",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "director.robotsystem",
"line_number": 99,
"usage_type": "name"
}
] |
18918432514
|
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import re
import json
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--start-maximized')
chrome_options.add_argument('--single-process')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--incognito")
chrome_options.add_argument('--disable-blink-features=AutomationControlled')
chrome_options.add_experimental_option('useAutomationExtension', False)
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_argument("disable-infobars")
ser = Service("./chromedriver.exe")
driver = webdriver.Chrome(options=chrome_options ,service=ser)
mainUrl = "https://batdongsan.com.vn/nha-dat-ban"
houseList = []
c = 1
for i in range(3, 123):
url = "https://batdongsan.com.vn/nha-dat-ban/p" + str(i+1)
driver.get(url)
houseTags = driver.find_elements(by=By.CSS_SELECTOR, value=".js__product-link-for-product-id")
houseUrls = [el.get_attribute("href") for el in houseTags]
# for subUrl in houseUrls:
for subUrl in houseUrls:
print(c)
print(subUrl)
driver.get(subUrl)
delay = 5 # seconds
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located(
(By.CSS_SELECTOR, '.re__media-thumb-item.js__media-thumbs-item.slick-slide.slick-active')))
except Exception as e:
print(e)
continue
html = driver.page_source
soup = BeautifulSoup(html, "html.parser")
house = {}
try:
house['Original Link'] = subUrl
house['Tên người bán'] = soup.select_one('body > div.re__main > div > div.re__main-sidebar > div.re__sidebar-box.re__contact-box.js__contact-box > div.re__contact-name.js_contact-name')['title']
house['Số điện thoại'] = soup.select_one('body > div.re__main > div > div.re__main-sidebar > div.re__sidebar-box.re__contact-box.js__contact-box > div.re__btn.re__btn-cyan-solid--md.phone > span')['mobile']
house['Tiêu đề'] = soup.select_one('#product-detail-web > h1').getText()
house['Địa chỉ'] = soup.find(class_ = "re__pr-short-description").getText()
moTa = soup.find(class_="re__section-body re__detail-content js__section-body js__pr-description js__tracking").getText()
s = re.sub('<br\s*?>', ' ', moTa)
house['Mô tả'] = s
elements = soup.find_all(class_ = "re__pr-specs-content-item-title")
elements2 = soup.find_all(class_ = "re__pr-specs-content-item-value")
for i in range(len(elements)):
a = elements2[i].getText()
b = str(a)
house[str(elements[i].getText())] = b
house['Ngày đăng'] = soup.select_one('#product-detail-web > div.re__pr-short-info.re__pr-config.js__pr-config > div:nth-child(1) > span.value').getText()
house['Ngày hết hạn'] = soup.select_one('#product-detail-web > div.re__pr-short-info.re__pr-config.js__pr-config > div:nth-child(1) > span.value').getText()
house['Mã tin'] = soup.select_one('#product-detail-web > div.re__pr-short-info.re__pr-config.js__pr-config > div:nth-child(4) > span.value').getText()
elements = soup.find_all("div", {"class":"re__media-thumb-item js__media-thumbs-item slick-slide slick-active"})
ImgArr = []
for el in elements:
ImgArr.append(el.findChild("img", recursive=False)['data-src'])
rs_s = ''
for i in range(len(ImgArr)- 1):
rs_s = rs_s + ImgArr[i] + ', '
house['Ảnh'] = rs_s
houseList.append(house)
c += 1
print(house)
except Exception as e:
print(e)
continue
print('so luong data: ' + str(len(houseList)))
s = set()
for i in houseList:
s.update(i)
header = list(s)
with open("Bat-dong-san2.csv", 'w', encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(header)
for d in houseList:
writer.writerow([d.get(i, "NULL") for i in header])
# with open('outputfile.json', 'w') as fout:
# json.dump(houseList, fout)
|
DangDuyAnh/Tich-Hop-Du-Lieu
|
DIProject/crawler/batdongsan-bot.py
|
batdongsan-bot.py
|
py
| 4,570 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 88,
"usage_type": "call"
}
] |
2517903386
|
import cv2
import numpy as np
import math
import os
import pygame #play music
from tkinter.filedialog import askdirectory
from tkinter import *
root=Tk()
root.configure(background='grey')
root.minsize(300,300)
listofsongs = []
total=3
index = total-1#of list
def nextsong(event):
global index
if(index==0):
index=total-1
else:
index-=1
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
def stopsong(event):
pygame.mixer.music.stop()
def directorychooser():
directory = askdirectory()
os.chdir(directory)#change directory
for files in os.listdir(directory):
if files.endswith(".mp3"):
listofsongs.append(files)
print(listofsongs)
pygame.mixer.init()#initialise mixer module
def nextsong():
global index
if(index==0):
index=total-1
else:
index-=1
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
def stopsong():
pygame.mixer.music.stop()
def prevsong():
global index
index+=1
index=index%total
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
def playsong():
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
directorychooser()
#listofsongs.reverse()
label=Label(root, text='Music Player',font=('times', 10, 'bold'), bg='grey')
label.pack()
listbox = Listbox(root,font=('times', 10, 'bold'),width=25, bg='white')
listbox.pack()
for items in listofsongs:
listbox.insert(0,items)
nextbutton=Button(root,activebackground='white',activeforeground='blue',font=('times', 9),text='Next Song',width=10)
nextbutton.pack()
previousbutton=Button(root,activebackground='white',activeforeground='blue',font=('times', 9),text='Previous Song',width=10)
previousbutton.pack()
stopbutton=Button(root,activebackground='white',activeforeground='blue',font=('times', 9),text='Stop',width=10)
stopbutton.pack()
playbutton=Button(root,activebackground='white',activeforeground='blue',font=('times', 9),text='Play',width=10)
playbutton.pack()
nextbutton.bind("<Button-1>",nextsong)#<Button-1> left button
#<Button-2> wheel
#3 is right
previousbutton.bind("<Button-1>",prevsong)
stopbutton.bind("<Button-1>",stopsong)
playbutton.bind("<Button-1>",playsong)
cap=cv2.VideoCapture(0)
while(cap.isOpened()):
ret,img=cap.read()
cv2.rectangle(img,(0,0),(350,350),(0,255,),0)
crop=img[0:350,0:350]#roi
grey=cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)
value=(35,35)
blur=cv2.GaussianBlur(grey,value,0)
ret1,thresh=cv2.threshold(blur,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)#cv2.CHAIN_APPROX_SIMPLE
drawing = np.zeros(crop.shape,np.uint8)
cnt = max(contours, key = lambda x: cv2.contourArea(x))
hull=cv2.convexHull(cnt)
areahull = cv2.contourArea(hull)
areacnt = cv2.contourArea(cnt)
arearatio=((areahull-areacnt)/areacnt)*100
cv2.drawContours(drawing,[cnt],0,(0,255,0),0)
cv2.drawContours(drawing,[hull],0,(0,0,255),0)
hull=cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
cv2.drawContours(thresh, contours, -1, (0, 255, 0), 3)#3 is width
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]#[start point, end point, farthest point, approximate distance to farthest point ].
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
cv2.circle(crop, far, 1, [0,0,255], -1)
# define actions required
if count_defects == 1:
cv2.putText(img,"2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
elif count_defects == 2:
str = "2"
cv2.putText(img, "3", (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255), 2)
elif count_defects == 3:
cv2.putText(img,"4", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
playsong()
elif count_defects == 4:
cv2.putText(img,"5", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
stopsong()
else:
if areacnt<2000:
cv2.putText(img,'Nothing',(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,(0,0,255),1,cv2.LINE_AA)
else:
if arearatio<12:
cv2.putText(img,'0',(50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 3, cv2.LINE_AA)
else:
cv2.putText(img,'1',(50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 3, cv2.LINE_AA)
cv2.imshow('thresh',thresh)
cv2.imshow('frame',img)
k = cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
SDUDEJA16/MUSIC-ly-Gesture-Controlled-Music-Player
|
hand_detectionandrecoginition.py
|
hand_detectionandrecoginition.py
|
py
| 5,433 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.mixer.music.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.stop",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.stop",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY_INV",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_NONE",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "cv2.convexHull",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.convexHull",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cv2.convexityDefects",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "math.acos",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 185,
"usage_type": "call"
}
] |
72478260029
|
from django.http import JsonResponse
from django.shortcuts import render
from redis_ import rd
# Create your views here.
from django.views.decorators.cache import cache_page
from art.models import Art
from user import helper
import redis_
from art import tasks
@cache_page(30)
def show(request,id):
login_user = helper.getLoginInfo(request) #读取session登陆信息
#阅读art_id的文章
art = Art.objects.get(pk=id)
#写入到阅读排行中(Redis->ReadTopRank)
redis_.incrTopRank(id)
readTopRank = redis_.getReadTopRank(5) #[(,score)]
return render(request,'art/show.html',locals())
def qdArt(request,id):
#获取当前登录的用户信息
login_user = helper.getLoginInfo(request)
if not login_user:
return JsonResponse({'msg':'请先登录','code':101})
tasks.qdTask.delay(login_user.get(id),id) #延迟异步执行
return JsonResponse({'msg':'正在抢读','code':201})
def queryQDState(request,id):
login_user = helper.getLoginInfo(request)
if not login_user:
return JsonResponse({'msg':'请先登录','code':101})
uid = login_user.get('id')
# if rd.hexists('qdArt',uid):
# # 一个用户抢两本书,查询最新的id的抢读状态,而不是之前抢读的状态
# qdId = rd.hget('qdArt', uid) # 已抢的书id, qdId和id可能不一样
if login_user.get('id'):
art = Art.objects.get(pk=id)
return JsonResponse({'msg':'抢读成功','code':200,
'art':{'title':art.title,
'author':art.author}
})
if rd.hlen('qdArt') < 5:
return JsonResponse({'msg': '抢读中', 'code': 201})
else:
return JsonResponse({'msg': '抢读失败', 'code': 300})
|
cjxxu/A_Fiction_web
|
myapps/art/views.py
|
views.py
|
py
| 1,808 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "user.helper.getLoginInfo",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "user.helper",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "art.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "art.models.Art.objects.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "art.models.Art.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "art.models.Art",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "redis_.incrTopRank",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "redis_.getReadTopRank",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.cache.cache_page",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "user.helper.getLoginInfo",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "user.helper",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "art.tasks.qdTask.delay",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "art.tasks.qdTask",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "art.tasks",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "user.helper.getLoginInfo",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "user.helper",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "art.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "art.models.Art.objects.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "art.models.Art.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "art.models.Art",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "art.models.title",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "art.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "art.models.author",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "art.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "redis_.rd.hlen",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "redis_.rd",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 53,
"usage_type": "call"
}
] |
3654514470
|
import sys
import json
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.messagebus.message import Message
from mycroft.configuration import ConfigurationManager
from websocket import create_connection
def main():
"""
Main function, will run if executed from command line.
Sends parameters from commandline.
Param 1: message string
Param 2: data (json string)
"""
# Parse the command line
if len(sys.argv) == 2:
messageToSend = sys.argv[1]
dataToSend = {}
elif len(sys.argv) == 3:
messageToSend = sys.argv[1]
try:
dataToSend = json.loads(sys.argv[2])
except BaseException:
print("Second argument must be a JSON string")
print("Ex: python -m mycroft.messagebus.send speak "
"'{\"utterance\" : \"hello\"}'")
exit()
else:
print("Command line interface to the mycroft-core messagebus.")
print("Usage: python -m mycroft.messagebus.send message")
print(" python -m mycroft.messagebus.send message JSON-string\n")
print("Examples: python -m mycroft.messagebus.send system.wifi.setup")
print("Ex: python -m mycroft.messagebus.send speak "
"'{\"utterance\" : \"hello\"}'")
exit()
send(messageToSend, dataToSend)
def send(messageToSend, dataToSend=None):
"""
Send a single message over the websocket.
Args:
messageToSend (str): Message to send
dataToSend (dict): data structure to go along with the
message, defaults to empty dict.
"""
dataToSend = dataToSend or {}
# Calculate the standard Mycroft messagebus websocket address
config = ConfigurationManager.get().get("websocket")
url = WebsocketClient.build_url(config.get("host"),
config.get("port"),
config.get("route"),
config.get("ssl"))
# Send the provided message/data
ws = create_connection(url)
packet = Message(messageToSend, dataToSend).serialize()
ws.send(packet)
ws.close()
if __name__ == '__main__':
try:
main()
except IOError:
print('Could not connect to websocket, no message sent')
|
injones/mycroft_ros
|
scripts/mycroft/messagebus/send.py
|
send.py
|
py
| 2,371 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "mycroft.configuration.ConfigurationManager.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "mycroft.configuration.ConfigurationManager",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "mycroft.messagebus.client.ws.WebsocketClient.build_url",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "mycroft.messagebus.client.ws.WebsocketClient",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "websocket.create_connection",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "mycroft.messagebus.message.Message",
"line_number": 63,
"usage_type": "call"
}
] |
30794578181
|
# Plattsalat specific python macros
import collections
import datetime
import numbers
import types
from typing import Any
import logging
import uno
from com.sun.star.lang import Locale
from com.sun.star.table.CellVertJustify import CENTER as vertCenter
from com.sun.star.table.CellHoriJustify import CENTER as horCenter
from com.sun.star.table.CellHoriJustify import RIGHT as horRight
from com.sun.star.table.CellHoriJustify import LEFT as horLeft
from com.sun.star.table import CellRangeAddress
def do_log(fname='/home/nils/tmp/oodebug.log'):
global log
logging.basicConfig(filename=fname)
log = logging.getLogger('libreoffice')
log.setLevel(logging.DEBUG)
class BioOfficeConn:
"""Connection to our Bio-Office database"""
def __init__(self):
# Obtain connection to our database.
# Needs the registered data source "bodb"
ctx = XSCRIPTCONTEXT.getComponentContext()
self.dbconn = ctx.ServiceManager.createInstanceWithContext(
"com.sun.star.sdb.DatabaseContext", ctx
).getByName("bodb").getConnection('', '')
def queryResult(self, sql, types):
"""Get the results of an SQL query as a list
sql is the query as a string, types is a string specifying
the types in each row. I is for Int, S for String, D for Double.
"""
meths = []
result = []
dbres = self.dbconn.createStatement().executeQuery(sql)
# create a list of methods from the type string
for c in types:
if c == 'I':
meths.append(getattr(dbres, 'getLong'))
elif c == 'S':
meths.append(getattr(dbres, 'getString'))
elif c == 'D':
meths.append(getattr(dbres, 'getDouble'))
while dbres.next():
result.append([meths[i](i+1) for i in range(len(meths))])
return result
def mkincond(name, value):
lst = ','.join(f"'{v}'" for v in value)
return f'{name} IN ({lst})'
def mkeqcond(name, value):
return f"{name} = '{value}'"
class Query(types.SimpleNamespace):
SQL = 'SELECT DISTINCT {cols} FROM V_Artikelinfo '\
"WHERE LadenID = 'PLATTSALAT' AND {cons} " \
'ORDER BY Bezeichnung'
EAN = 'CAST(CAST(EAN AS DECIMAL(20)) AS VARCHAR(20))'
Cols = ["EAN", "Bezeichnung", "Land", "VK1", "VK0", "VKEinheit"]
SCols = "SSSDDS"
CONDS = []
def __init__(self, wg=None, iwg=None, liefer=None) -> None:
self.wg, self.iwg, self.liefer = wg, iwg, liefer
def run(self):
self.cols = ','.join(self.EAN if c == 'EAN' else f'{c}' for c in self.Cols)
conditions = self.CONDS.copy()
for n, name in dict(iwg='iWG', liefer='LiefID', wg='WG').items():
value = self.__dict__[n]
if value is None: continue
if isinstance(value, list):
conditions.append(mkincond(name, value))
else:
conditions.append(mkeqcond(name, value))
self.cons = ' AND '.join(conditions)
self.sql = self.SQL.format_map(self.__dict__)
# log.debug(f'Query: {self.sql}')
return BioOfficeConn().queryResult(self.sql, self.SCols)
Pos = collections.namedtuple('Pos', 'x y')
class ColumnDef(types.SimpleNamespace):
"""Options for a single column in a table
This is mostly a container for various options. The following
options are currently recognized:
- width (int) width in mm
- height (int) char height
- tryOptWidth (boolean) First try to set the width to its optimum
. value. Only if that is too big, set it to the given width
- bold (boolean) set typeface to bold
- greyUnit (boolean) set background to grey if the text appears
. to represent discrete units
- hcenter (boolean) Center horizontally
- hright (boolean) Align on the right
"""
colDefaults = dict(
bold=False,
greyUnit=False,
tryOptWidth=False,
width=10,
height=12,
hcenter=False,
hright=False,
hleft=False
)
def __init__(self, **opts):
self.__dict__.update(ColumnDef.colDefaults)
super().__init__(**opts)
class Sheet:
"""A single sheet to be filled with tables"""
def __init__(self, name, cols, titlerows=0):
desktop = XSCRIPTCONTEXT.getDesktop()
# Create a new calc and use its first sheet
self.calc = desktop.loadComponentFromURL(
"private:factory/scalc", "_blank", 0, ()
)
self.sheet = self.calc.Sheets.getByIndex(0)
self.sheet.Name = name
self.cols = cols
self.titlerows = titlerows
self.currencyformat = self.calc.NumberFormats.getStandardFormat(
uno.getConstantByName("com.sun.star.util.NumberFormat.CURRENCY"),
Locale('de', 'DE', '')
)
self.Linestyle = uno.createUnoStruct("com.sun.star.table.BorderLine2")
self.Linestyle.OuterLineWidth = 5
self.ColDefs = []
self.Boldface = uno.getConstantByName("com.sun.star.awt.FontWeight.BOLD")
# Get the default cell style
# and use it to set use a 12pt Font Size by default
cs = self.calc.StyleFamilies.CellStyles.getByName('Default')
cs.CharHeight = 12
def addColumns(self, cols):
self.ColDefs += cols
def getCell(self, x, y):
return self.sheet.getCellByPosition(x, y)
def getMergeCell(self, x, y):
r = self.sheet.getCellRangeByPosition(x, y, x, y + 1)
r.merge(True)
return self.sheet.getCellByPosition(x, y)
def getCol(self, col):
return self.sheet.getColumns().getByIndex(col)
def getRow(self, row):
return self.sheet.getRows().getByIndex(row)
def styleBlock(self, x, y, n):
"""Style a row, Blocks with lines everywhere.
"""
cells = self.sheet.getCellRangeByPosition(x, y, x + n - 1, y)
cells.LeftBorder = self.Linestyle
cells.RightBorder = self.Linestyle
cells.TopBorder = self.Linestyle
cells.BottomBorder = self.Linestyle
cells.ParaRightMargin = 100
cells.ParaLeftMargin = 100
def styleAltGrey(self, x, y, n):
"""Style a row, Alternating grey background
"""
self.getCell(x, y).LeftBorder = self.Linestyle
self.getCell(x + n - 1, y).RightBorder = self.Linestyle
if (y & 1) == 1:
cells = self.sheet.getCellRangeByPosition(
x, y, x + n - 1, y
)
cells.CellBackColor = 0xdddddd
def addData(self, *lists, style='Block'):
mysheet = self
class Cellpos:
def __init__(self, cols, rows):
self.x = 0
self.y = mysheet.titlerows
self.cols = cols
self.rows = rows
def advance(self):
# go one down
self.y += 1
# if at bottom row, go to top and left
if self.y == self.rows + mysheet.titlerows:
self.x = self.x + self.cols + 1
self.y = mysheet.titlerows
# N is sum of list members
N = 0
for list in lists: N += len(list)
# colCols is the number of columns in each list. All lists
# are supposed to have the same number of columns.
self.colCols = max(len(ll[0]) if len(ll) > 0 else 0 for ll in lists)
if self.colCols == 0:
raise ValueError('All lists are empty')
self.HeaderPositions = []
self.totalCols = self.cols * (self.colCols + 1) - 1
# Each list starts with a Label, using a single row
# then one row for each member and another row to separate
# the list from the next one. The total numer of rows
# is TR = <number of lists> * 2 - 1 + <sum of list lengths>
needed = len(lists) * 2 - 1 + N
# We want to divide these equally over all columns,
# so we round up to the next multiple of cols and
# get the actual number of sheet rows
self.totalRows = (needed + self.cols-1) // self.cols
rest = self.totalRows * self.cols - needed
pos = Cellpos(self.colCols, self.totalRows)
styler = getattr(self, 'style'+style)
for list in lists:
self.HeaderPositions.append(Pos(pos.x, pos.y))
# advance once, to get room for the label
if len(lists) > 1:
pos.advance()
for row in list:
for i, val in enumerate(row):
cell = self.getCell(pos.x + i, pos.y)
if isinstance(val, numbers.Number) and val < 2000000000:
cell.Value = val
else:
cell.String = val
if isinstance(val, float):
cell.NumberFormat = self.currencyformat
styler(pos.x, pos.y, self.colCols)
pos.advance()
# advance once at the end of a list
pos.advance()
if rest > 0:
pos.advance()
rest -= 1
def addPagelistrow(self, row):
cell = self.getMergeCell(0, self.crow)
cell.String = row[0]
cell = self.getMergeCell(1, self.crow)
cell.String = row[1]
cell = self.getCell(2, self.crow)
cell.String = row[2]
cell = self.getCell(2, self.crow+1)
cell.String = row[3]
cell = self.getMergeCell(3, self.crow)
cell.Value = row[4]
cell.NumberFormat = self.currencyformat
cell = self.getMergeCell(4, self.crow)
cell.Value = row[5]
cell.NumberFormat = self.currencyformat
self.crow += 2
def addPagelist(self, *lists, style='Block', hstretch=1.2):
"""Add a single page list in fixed layout
Solely used by Wagenlisten, which produces several pages,
one for each location.
"""
self.crow = self.titlerows
self.colCols = len(lists[0][0])
self.HeaderPositions = []
self.totalCols = self.colCols
styler = getattr(self, 'style'+style)
for list in lists:
if self.crow > self.titlerows:
self.getRow(self.crow).IsStartOfNewPage = True
for row in list:
self.addPagelistrow(row)
styler(0, self.crow-2, self.colCols-1)
styler(0, self.crow-1, self.colCols-1)
def getOptimalScale(self, header=False):
"""Calculate the optimal scale factor in percent
"""
w = 0
for i in range(self.totalCols):
w += self.getCol(i).Width
h = 0
for i in range(self.totalRows):
h += self.getRow(i).Height
if h == 0 or w == 0: return 100 # should not happen
ws = 19500 / w # factor to scale to 195mm width
hs = 28200 / h # factor to scale to 270mm height
# We must use the smaller of the two for scaling.
# If hs is smaller, the resulting height is at the maximum,
# and we only might make the Columns a bit wider, but we don't
if hs < ws: return int(hs * 100)
# If ws is smaller, the resulting width is at the maximum.
# In that case we can still make each row a bit higher to increase
# readability
hstretch = 28200 / (h * ws)
if hstretch > 1.5: hstretch = 1.5
for i in range(self.totalRows):
self.getRow(i).Height = self.getRow(i).Height * hstretch
return int(ws * 100)
def getOptimalScaleExt(self, landscape, pages, header=False):
nrows = (self.totalRows + pages-1) // pages
w = 0
for i in range(self.totalCols):
w += self.getCol(i).Width
h = 0
for i in range(nrows):
h += self.getRow(i+self.titlerows).Height
for i in range(self.titlerows):
h += self.getRow(i).Height
if h == 0 or w == 0: return 100 # should not happen
if landscape:
towidth = 28400
toheight = 19800
else:
towidth = 19800
toheight = 28400
if header:
toheight -= 900
ws = towidth / w
hs = toheight / h
if hs < ws: return int(hs * 100)
hstretch = toheight / (h * ws)
if hstretch > 1.8: hstretch = 1.8
for i in range(self.titlerows, self.totalRows):
self.getRow(i).Height = self.getRow(i).Height * hstretch
return int(ws * 100)
def pieceMarker(self, x, y):
cell = self.getCell(x, y)
if len(cell.String) == 2 and cell.String != 'Kg':
# cell.CellBackColor = 0xdddddd
cell.CharWeight = self.Boldface
def pieceMarkCol(self, col):
for i in range(self.totalRows):
self.pieceMarker(col, i)
def formatCol(self, i, cdef):
col = self.getCol(i)
if cdef.tryOptWidth:
col.OptimalWidth = True
if col.Width > cdef.width * 100:
col.Width = cdef.width * 100
else:
col.Width = cdef.width * 100
if cdef.bold:
col.CharWeight = self.Boldface
if cdef.greyUnit:
self.pieceMarkCol(i)
if cdef.height != 12:
col.CharHeight = cdef.height
if cdef.hright:
col.HoriJustify = horRight
if cdef.hleft:
col.HoriJustify = horLeft
col.VertJustify = vertCenter
def formatColumns(self):
for t in range(self.cols):
for i, cdef in enumerate(self.ColDefs):
self.formatCol(t * (self.colCols + 1) + i, cdef)
if t < self.cols-1:
self.getCol((t+1) * (self.colCols + 1) - 1).Width = 800
def setListLabels(self, *labels, cheight=14):
for i, l in enumerate(labels):
p = self.HeaderPositions[i]
cell = self.getCell(p.x + 1, p.y)
cell.String = l
cell.CharHeight = cheight
cell.CharWeight = self.Boldface
def setPageStyle(self, landscape=False, maxscale=True, pages=1, date=False):
defp = self.calc.StyleFamilies.PageStyles.getByName("Default")
defp.LeftMargin = 500
defp.TopMargin = 500
defp.BottomMargin = 500
defp.RightMargin = 500
defp.HeaderIsOn = False
defp.FooterIsOn = False
defp.CenterHorizontally = True
defp.CenterVertically = False
if landscape:
defp.Width = 29700
defp.Height = 21000
defp.IsLandscape = True
if date:
defp.HeaderIsOn = True
hs = defp.RightPageHeaderContent
hs.LeftText.String = datetime.date.today().strftime('%d.%m.%Y')
hs.CenterText.String = ''
defp.RightPageHeaderContent = hs
if maxscale:
if landscape or pages > 1:
defp.PageScale = self.getOptimalScaleExt(landscape, pages, header=date)
else:
defp.PageScale = self.getOptimalScale(header=date)
def setHeaderRow(self, titles):
self.sheet.setTitleRows(CellRangeAddress(StartRow=0, EndRow=0))
for i in range(self.cols):
for title in titles:
pos = title[0]
cdef = title[2]
cell = self.getCell(i * (self.colCols + 1) + pos, 0)
cell.String = title[1]
if cdef.bold:
cell.CharWeight = self.Boldface
if cdef.height != 12:
cell.CharHeight = cdef.height
if cdef.hcenter:
cell.HoriJustify = horCenter
class WaagenlistenQuery(Query):
Cols = ["EAN", "Bezeichnung", "Land", "VKEinheit", "VK1", "VK0"]
SCols = "SSSSDD"
CONDS = [
"Waage = 'A'",
"WG IN ('0001', '0003')"
]
def Waagenlisten(*args):
"""
Location based lists
For each of the 7 locations create a landscape formatted
page with large items, all on one sheet with page breaks
ready to print.
These lists will be placed at the various places where
fruits and vegetables can be found.
"""
locs = [
'Apfel', 'Kartoffel', 'Knoblauch', 'kühl links', 'kühl rechts',
'Pilze', 'Zitrone', 'Zwiebel'
]
lists = []
for loc in locs:
# Obtain list for location
L = WaagenlistenQuery(iwg=loc).run()
# Use consistent capitalization for the unit
for r in L: r[3] = r[3].capitalize()
lists.append(L)
sheet = Sheet('Waagenliste', 1, titlerows=1)
sheet.addPagelist(*lists)
sheet.addColumns([
ColumnDef(height=24, width=18, bold=True, hleft=True),
ColumnDef(height=29, width=100, bold=True),
ColumnDef(width=8),
ColumnDef(height=22, width=35),
ColumnDef(height=22, width=35),
])
sheet.formatColumns()
sheet.setHeaderRow([
[2, '', ColumnDef(hcenter=True, height=9)],
[3, 'Mitglieder', ColumnDef(hcenter=True, height=10, bold=True)],
[4, 'Nichtmitglieder', ColumnDef(hcenter=True, height=10, bold=True)]
])
sheet.setPageStyle(maxscale=False, date=True)
class WaageQuery(Query):
Cols = ["EAN", "Bezeichnung", "Land", "VK1", "VK0", "VKEinheit"]
SCols = "SSSDDS"
CONDS = ["Waage = 'A'"]
def Waagenliste(*args):
"""Lists for the electronic balances
Create a ready to print spreadsheet for the
electronic balances, containing the EAN numbers,
the names and the unit
The list is in landscape format and fitted to two pages.
"""
# Obtain lists from DB via sql query
listGemuese = WaageQuery(wg='0001').run()
listObst = WaageQuery(wg='0003').run()
# Use a consistant capitalization for the unit
for r in listGemuese: r[5] = r[5].capitalize()
for r in listObst: r[5] = r[5].capitalize()
sheet = Sheet('Waagenliste', 2, titlerows=1)
sheet.addData(listGemuese, listObst, style='AltGrey')
sheet.addColumns([
ColumnDef(height=13, width=10, bold=True, hleft=True),
ColumnDef(height=13, width=57, bold=True, tryOptWidth=True),
ColumnDef(width=7),
ColumnDef(height=14, width=21),
ColumnDef(height=14, width=21),
ColumnDef(width=8, greyUnit=True, hright=True)
])
sheet.formatColumns()
sheet.setListLabels("Gemüse", "Obst", cheight=15)
sheet.setHeaderRow([
[2, 'Land', ColumnDef(hcenter=True, height=9)],
[3, 'Mitglieder', ColumnDef(hcenter=True, height=10, bold=True)],
[4, 'Nicht-\nmitglieder', ColumnDef(hcenter=True, height=10, bold=True)]
])
sheet.setPageStyle(landscape=True, pages=2, date=True)
return None
class WaagenupQuery(Query):
Cols = ["EAN", "Bezeichnung", "VK1", "VKEinheit"]
SCols = "SSDS"
CONDS = ["Waage = 'A'"]
def WaagenlisteUp(*args):
"""Lists for the electronic balances
Create a ready to print spreadsheet for the
electronic balances, containing the EAN numbers,
the names and the unit.
The list is in portrait format and fitted onto a single page.
"""
# Obtain lists from DB via sql query
listGemuese = WaagenupQuery(wg='0001').run()
listObst = WaagenupQuery(wg='0003').run()
# Use a consistant capitalization for the unit
for r in listGemuese: r[3] = r[3].capitalize()
for r in listObst: r[3] = r[3].capitalize()
sheet = Sheet('Waagenliste', 2)
sheet.addData(listGemuese, listObst)
sheet.addColumns([
ColumnDef(width=10, bold=True),
ColumnDef(width=50, tryOptWidth=True),
ColumnDef(width=17),
ColumnDef(width=10, greyUnit=True)
])
sheet.formatColumns()
sheet.setListLabels("Gemüse", "Obst")
sheet.setPageStyle()
return None
class SchrankQuery(Query):
Cols = ["EAN", "Bezeichnung", "Land", "VK1", "VK0", "LiefID"]
SCols = 'SSSDDS'
def SchranklisteKuehl1(*args):
"""Lists for the Refridgerators"""
listKuehl1 = SchrankQuery(iwg='1Mopro').run()
sheet = Sheet('Kühlschrankliste1', 1, titlerows=1)
sheet.addData(listKuehl1)
sheet.setHeaderRow([
[0, 'EAN', ColumnDef(bold=True, hcenter=True)],
[1, 'Bezeichnung', ColumnDef()],
[2, 'Land', ColumnDef()],
[3, 'Mitglieder', ColumnDef(hcenter=True, height=10, bold=True)],
[4, 'Nicht-\nmitglieder', ColumnDef(hcenter=True, height=10, bold=True)],
[5, 'Hersteller', ColumnDef(hcenter=True)]
])
sheet.addColumns([
ColumnDef(width=35, bold=True),
ColumnDef(width=90),
ColumnDef(width=10),
ColumnDef(width=25),
ColumnDef(width=25),
ColumnDef(width=30)
])
sheet.formatColumns()
sheet.setPageStyle()
return None
class KassenlandQuery(Query):
Cols = ["EAN", "Bezeichnung", "Land", "VKEinheit", "VK1", "VK0"]
SCols = "SSSSDD"
CONDS = ["Waage = 'A'"]
def KassenlisteGemuese(*args):
# Obtain lists from DB via sql query
listGemuese = KassenlandQuery(wg='0001').run()
listObst = KassenlandQuery(wg='0003').run()
# Use a consistant capitalization for the unit
for r in listGemuese: r[3] = r[3].capitalize()
for r in listObst: r[3] = r[3].capitalize()
sheet = Sheet('Kassenliste', 2)
sheet.addData(listGemuese, listObst)
sheet.addColumns([
ColumnDef(width=10, bold=True), # EAN
ColumnDef(width=50, tryOptWidth=True), # Bezeichnung
ColumnDef(width=8), # Land
ColumnDef(width=8, greyUnit=True), # VKEinheit
ColumnDef(width=17), # Preis Mitglieder
ColumnDef(width=17) # Preis Andere
])
sheet.formatColumns()
sheet.setListLabels("Gemüse", "Obst")
sheet.setPageStyle()
return None
class KassenQuery(Query):
Cols = ["EAN", "Bezeichnung", "VKEinheit", "VK1", "VK0"]
SCols = "SSSDD"
def KassenlisteBrot(name, id):
# Obtain lists from DB via sql query
lst1 = KassenQuery(wg='0020', liefer=id).run()
lst2 = KassenQuery(wg='0025', liefer=id).run()
# Use a consistant capitalization for the unit
for r in lst1: r[2] = r[2].capitalize()
for r in lst2: r[2] = r[2].capitalize()
sheet = Sheet('KassenlisteBrot'+id, 2)
sheet.addData(lst1, lst2)
sheet.addColumns([
ColumnDef(width=15, bold=True), # EAN
ColumnDef(width=50, tryOptWidth=True), # Bezeichnung
ColumnDef(width=12, greyUnit=True), # VKEinheit
ColumnDef(width=14, height=10), # Preis Mitglieder
ColumnDef(width=14, height=10) # Preis Andere
])
sheet.formatColumns()
sheet.setListLabels(name + ' Brot', name + ' Kleingebäck')
sheet.setPageStyle()
return None
def KassenlisteBrotS(*args):
return KassenlisteBrot('Schäfer', 'SCHÄFERBROT')
def KassenlisteBrotW(*args):
return KassenlisteBrot('Weber', 'WEBER')
def KassenlisteFleisch(name, id):
lst = KassenQuery(wg='0090', liefer=id).run()
for r in lst: r[2] = r[2].capitalize()
sheet = Sheet('KassenlisteFleisch'+name, 2)
sheet.addData(lst)
sheet.addColumns([
ColumnDef(width=15, bold=True), # EAN
ColumnDef(width=50, tryOptWidth=True), # Bezeichnung
ColumnDef(width=12, greyUnit=True), # VKEinheit
ColumnDef(width=14, height=10), # Preis Mitglieder
ColumnDef(width=14, height=10) # Preis Andere
])
sheet.formatColumns()
sheet.setListLabels('Fleisch ' + name)
sheet.setPageStyle()
return None
def KassenlisteFleischFau(*args):
return KassenlisteFleisch('Fauser', 'FAUSER')
def KassenlisteFleischUnt(*args):
return KassenlisteFleisch('Unterweger', 'UNTERWEGER')
def KassenlisteFleischUri(*args):
return KassenlisteFleisch('Uria', 'URIA')
def KassenlisteLoseWare(*args):
lst1 = KassenQuery(wg='0585').run()
lst2 = KassenQuery(wg='0590').run()
lst3 = KassenQuery(iwg='HH', wg='0400').run()
lst4 = KassenQuery(iwg='HH', wg=['0070', '0200', '0280', '0340']).run()
lst5 = KassenQuery(iwg='HH', wg=['0020', '0025', '0060']).run()
for r in lst1: r[2] = r[2].capitalize()
for r in lst2: r[2] = r[2].capitalize()
for r in lst3: r[2] = r[2].capitalize()
for r in lst4: r[2] = r[2].capitalize()
for r in lst5: r[2] = r[2].capitalize()
sheet = Sheet('KassenlisteLoseWare', 2)
sheet.addData(lst1, lst2, lst3, lst4, lst5)
sheet.addColumns([
ColumnDef(width=32, bold=True), # EAN
ColumnDef(width=50, tryOptWidth=True), # Bezeichnung
ColumnDef(width=12, greyUnit=True), # VKEinheit
ColumnDef(width=16, height=10), # Preis Mitglieder
ColumnDef(width=16, height=10) # Preis Andere
])
sheet.formatColumns()
sheet.setListLabels(
'Lose Lebensmittel', 'Lose Waschmittel',
'Säfte', '5 Elemente', 'Tennental'
)
sheet.setPageStyle()
return None
# Only export the public functions as macros
g_exportedScripts = [
KassenlisteBrotS,
KassenlisteBrotW,
KassenlisteFleischFau,
KassenlisteFleischUnt,
KassenlisteFleischUri,
KassenlisteGemuese,
KassenlisteLoseWare,
SchranklisteKuehl1,
Waagenliste,
WaagenlisteUp,
Waagenlisten
]
|
nilsrennebarth/oodbpyges
|
Psmacros.py
|
Psmacros.py
|
py
| 21,881 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "uno.getConstantByName",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "com.sun.star.lang.Locale",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "uno.createUnoStruct",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "uno.getConstantByName",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numbers.Number",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "com.sun.star.table.CellHoriJustify.RIGHT",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "com.sun.star.table.CellHoriJustify.LEFT",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "com.sun.star.table.CellVertJustify.CENTER",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "com.sun.star.table.CellRangeAddress",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "com.sun.star.table.CellHoriJustify.CENTER",
"line_number": 437,
"usage_type": "name"
}
] |
27618901256
|
from django.contrib import admin
from home.models import Setting, ContactFormMessage
class ContactForMessageAdmin(admin.ModelAdmin):
list_display = ["name","email","subject","note","status"]
list_filter = ["status"]
# Register your models here.
admin.site.register(ContactFormMessage,ContactForMessageAdmin)
admin.site.register(Setting)
|
mfatihyarar/B200109020_proje
|
home/admin.py
|
admin.py
|
py
| 348 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "home.models.ContactFormMessage",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "home.models.Setting",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
}
] |
7191980185
|
from django.shortcuts import render, redirect
from .models import article
# Create your views here.
def index_defined_in_view(request):
articles = article.objects.all()
new_article = []
for row in articles:
if(len(row.title)>5):
new_article.append(row)
return render(request, 'index.html', {'articles': new_article})
def detail_defined_in_view(request, num_title):
contents = article.objects.get(pk=num_title)
return render(request, 'detail.html', {'contents': contents.content})
def new_defined_in_view(request):
if request.method == 'POST':
print(request.POST)
new_article = article.objects.create(
title=request.POST['title'],
content=request.POST['content'],
)
return redirect('detail', num_title=new_article.pk)
else:
return render(request, 'new.html')
|
dooking/LikeLion
|
session8/blog/write/views.py
|
views.py
|
py
| 879 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.article.objects.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.article.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.article",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.article.objects.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.article.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.article",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.article.objects.create",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.article.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "models.article",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
}
] |
26767459770
|
from statistics import mode
import pytorch_lightning as pl
from torch.nn import functional as F
from torch import optim
from transformers import AutoModelForSequenceClassification
import torch
import pandas as pd
import numpy as np
from prediction_stats import print_stats
trans_cache_dir = "/cluster/scratch/gboeshertz/huggingface_cache"
class BaseModule(pl.LightningModule):
def __init__(self, config,class_weights=None):
super().__init__()
self.save_hyperparameters()
self.config = config
self.model = AutoModelForSequenceClassification.from_pretrained(config['model_name'], num_labels=2,
ignore_mismatched_sizes=True,cache_dir =trans_cache_dir)
self.test_list_logits = []
self.test_list_labels = []
if class_weights is not None:
self.class_weights = torch.from_numpy(class_weights).float()
def load_ckpt(self,path):
model_dict = torch.load(path,map_location= "cuda" if torch.cuda.is_available() else "cpu" )['state_dict']
model_dict = {k.replace('model.',''):v for k,v in model_dict.items() if 'model' in k}
#if not torch.cuda.is_available():
# for k, v in model_dict.items():
# model_dict[k] = v.cpu()
self.model.load_state_dict(model_dict)
def forward(self, x):
# x should be a dictionnary with at least a key input_ids
return self.model(x).logits
def get_loss(self,preds,y,weights=None):
return F.cross_entropy(preds.logits,y,weight=weights)
def predict_sentence(self, tokens,masks=None,segs = None):
with torch.no_grad():
tokens = tokens.to(self.model.device)
if segs is not None:
segs = segs.to(self.model.device)
if masks is not None:
masks = masks.to(self.model.device)
output = self.model(tokens, token_type_ids=segs, attention_mask=masks)
pred = output.logits.argmax(axis=1).item()
return pred
def batch_step(self,batch):
if self.config["use_hypothesis"]:
pair_token_ids, mask_ids, seg_ids, y = batch
pair_token_ids = pair_token_ids.to(self.model.device)
mask_ids = mask_ids.to(self.model.device)
seg_ids = seg_ids.to(self.model.device)
labels = y.to(self.model.device)
output = self.model(pair_token_ids,
token_type_ids=seg_ids,
attention_mask=mask_ids,
labels=labels)
else:
premise_ids, mask_ids, y = batch
premise_ids = premise_ids.to(self.model.device)
mask_ids = mask_ids.to(self.model.device)
labels = y.to(self.model.device)
output = self.model(premise_ids,
attention_mask=mask_ids,
labels=labels)
return output
def training_step(self, batch, batch_idx):
if self.class_weights is not None:
self.class_weights = self.class_weights.to(self.model.device)
output = self.batch_step(batch)
#accuracy = (output.logits.argmax(axis=0) == y).mean()
y = batch[-1].to(self.model.device)
loss = self.get_loss(output,y,self.class_weights)
accuracy = (output.logits.argmax(axis=1) == y).float().mean().item()
self.log("train_acc", accuracy,on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("train_loss", loss,on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
if self.class_weights is not None:
self.class_weights = self.class_weights.to(self.model.device)
output = self.batch_step(batch)
y = batch[-1].to(self.model.device)
loss = self.get_loss(output,y,self.class_weights)
accuracy = (output.logits.argmax(axis=1) == y).float().mean().item()
self.log("val_acc", accuracy,on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("val_loss", loss,on_step=True, on_epoch=True, prog_bar=True, logger=True)
def test_step(self, batch, batch_idx):
logits = self.batch_step(batch).logits
self.test_list_logits.append(logits)
self.test_list_labels.append(batch[-1].to(self.model.device))
def test_epoch_end(self, outputs):
test_outputs = torch.vstack(self.test_list_logits).cpu().numpy()
test_labels = torch.vstack(self.test_list_labels).cpu().numpy()[:,0]
test_outputs = test_outputs.argmax(axis=1)
preds_labels = np.vstack((test_outputs,test_labels))
with open('test_outputs/'+ self.config["test_out_path"] + '.npy', 'wb') as f:
np.save(f, preds_labels)
print_stats(preds_labels,self.config["test_out_path"])
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.config['lr'])
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[1, 2], gamma=0.1)
return [optimizer], [lr_scheduler]
|
gauthierboeshertz/fallacy_detection
|
base_module.py
|
base_module.py
|
py
| 5,254 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "transformers.AutoModelForSequenceClassification.from_pretrained",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForSequenceClassification",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.vstack",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.vstack",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "prediction_stats.print_stats",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.optim.AdamW",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.MultiStepLR",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 131,
"usage_type": "name"
}
] |
42947667200
|
from PyQt4 import QtGui, QtCore
from twisted.internet.defer import inlineCallbacks, returnValue
import socket
import os
from barium.lib.clients.gui.piezo_mirror_gui import QPiezoMirrorGui
from config.multiplexerclient_config import multiplexer_config
#from labrad.units import WithUnit as U
SIGNALID1 = 445571
SIGNALID2 = 445572
SIGNALID3 = 445573
SIGNALID4 = 445574
class Piezomirror_client(QtGui.QWidget):
def __init__(self, reactor, parent=None):
super(Piezomirror_client, self).__init__()
self.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
print("b")
self.reactor = reactor
self.channel = {}
self.channel_GUIs = {}
self.connect()
@inlineCallbacks
def connect(self):
"""Creates an Asynchronous connection to the wavemeter computer and
connects incoming signals to relavent functions (((which computer???)))
"""
from labrad.wrappers import connectAsync
self.password = os.environ['LABRADPASSWORD']
self.cxn = yield connectAsync('localhost', name = socket.gethostname()\
+ 'Piezo_Mirror Gui', password=self.password)
self.reg = self.cxn.registry
self.server = yield self.cxn.piezo_controller
#self.set_up_channels()
self.initializeGUI()
@inlineCallbacks
def initializeGUI(self):
layout = QtGui.QGridLayout()
qBox = QtGui.QGroupBox('Piezo Mirror Gui')
subLayout = QtGui.QGridLayout()
qBox.setLayout(subLayout)
layout.addWidget(qBox, 0, 0), returnValue
#yield self.reg.cd(['Clients','Fiber Switch Client'])
#self.channel_list = yield self.reg.get('Channels')
self.gui = QPiezoMirrorGui()
#init_chan = yield self.server.get_channel()
#self.channel.displayChannel.setNum(int(init_chan))
'''
for now channels labels are stored in the registry as
a list of 2-element arrays, i.e.,
[['laser 1', channel num], ['laser 2', chan num], ...]
stored in "registry/Clients/Fiber Switch Client"
'''
## stateA = yield self.is_rf_A_on()
## print(stateA)
## if stateA:
## print("if")
## self.gui.rf_switch.setDown(True)
## stateB = yield self.is_rf_B_on()
## if stateB:
## self.gui.rf_switch2.setDown(True)
self.gui.SpinVoltage.valueChanged.connect(lambda Voltage1 = self.gui.SpinVoltage.value(),\
: self.set_dac_voltage(1,Voltage1))
self.gui.SpinVoltage2.valueChanged.connect(lambda Voltage2 = self.gui.SpinVoltage2.value(),\
: self.set_dac_voltage(2,Voltage2))
self.gui.SpinVoltage3.valueChanged.connect(lambda Voltage3 = self.gui.SpinVoltage3.value(),\
: self.set_dac_voltage(3,Voltage3))
self.gui.SpinVoltage4.valueChanged.connect(lambda Voltage4 = self.gui.SpinVoltage3.value(),\
: self.set_dac_voltage(4,Voltage4))
self.gui.volt_switch.clicked.connect(lambda state1 = self.gui.volt_switch.isDown(),\
: self.set_state(1,state1))
self.gui.volt_switch2.clicked.connect(lambda state2 = self.gui.volt_switch2.isDown(),\
: self.set_state(2,state2))
self.gui.volt_switch3.clicked.connect(lambda state3 = self.gui.volt_switch3.isDown(),\
: self.set_state(3,state3))
self.gui.volt_switch4.clicked.connect(lambda state4 = self.gui.volt_switch4.isDown(),\
: self.set_state(4,state4))
## self.channel.checkChannel.clicked.connect(lambda: self.refreshNum())
##
##
### print(channel1[0])
## self.channel.c1label.setText(str(self.channel_list[0][0]) + ' nm')
## self.channel.c2label.setText(str(self.channel_list[1][0]) + ' nm')
## self.channel.c3label.setText(str(self.channel_list[2][0]) + ' nm')
## self.channel.c4label.setText(str(self.channel_list[3][0]) + ' nm')
## self.channel.c5label.setText(str(self.channel_list[4][0]) + ' nm')
## self.channel.c6label.setText(str(self.channel_list[5][0]) + ' nm')
## self.channel.c7label.setText(str(self.channel_list[6][0]) + ' nm')
## self.channel.c8label.setText(str(self.channel_list[7][0]) + ' nm')
##
##
## #self.channel_GUIs[chan] = laser
subLayout.addWidget(self.gui, 1, 1)
layout.minimumSize()
self.setLayout(layout)
@inlineCallbacks
def set_state(self, chan,value):
yield self.server.set_output_state(chan,value)
@inlineCallbacks
def set_dac_voltage(self, chan, voltage):
#self.lasers[chan][7] = voltage
yield self.server.set_dac_voltage(chan,voltage)
## @inlineCallbacks
## def rf_freq_A(self, num):
## yield self.server.set_channel(0)
## yield self.server.set_freq(num)
## @inlineCallbacks
## def rf_freq_B(self, num):
## yield self.server.set_channel(1)
## yield self.server.set_freq(num)
##
## @inlineCallbacks
## def rf_output_A(self,state):
## if state:
## yield self.server.set_channel(0)
## yield self.server.turn_on_rf()
## else:
## yield self.server.set_channel(0)
## yield self.server.turn_off_rf()
## @inlineCallbacks
## def rf_output_B(self,state):
## if state:
## yield self.server.set_channel(1)
## yield self.server.turn_on_rf()
## else:
## yield self.server.set_channel(1)
## yield self.server.turn_off_rf()
##
## @inlineCallbacks
## def is_rf_A_on(self):
## yield self.server.set_channel(0)
## state= yield self.server.is_rf_on()
## returnValue(state)
##
## @inlineCallbacks
## def is_rf_B_on(self):
## yield self.server.set_channel(1)
## state= yield self.server.is_rf_on()
## returnValue(state)
## @inlineCallbacks
## def rf_off_A(self):
## yield self.server.set_channel("0")
## yield self.server.turn_off_rf()
## @inlineCallbacks
## def rf_off_B(self):
## yield self.server.set_channel("1")
## yield self.server.turn_off_rf()
if __name__ == "__main__":
b = QtGui.QApplication( [] )
print("a")
import qt4reactor
print("C")
qt4reactor.install()
from twisted.internet import reactor
piezo_client = Piezomirror_client(reactor)
piezo_client.show()
reactor.run()
|
barium-project/barium
|
lib/clients/Piezo_mirror_client/Piezo_mirror_client.py
|
Piezo_mirror_client.py
|
py
| 6,846 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "PyQt4.QtGui.QWidget",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QSizePolicy",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "labrad.wrappers.connectAsync",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QGridLayout",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QGroupBox",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QGridLayout",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.returnValue",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "barium.lib.clients.gui.piezo_mirror_gui.QPiezoMirrorGui",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "twisted.internet.defer.inlineCallbacks",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QApplication",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "qt4reactor.install",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "{'connectAsync': 'labrad.wrappers.connectAsync'}",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor",
"line_number": 191,
"usage_type": "argument"
},
{
"api_name": "twisted.internet.reactor.run",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor",
"line_number": 193,
"usage_type": "name"
}
] |
70396798267
|
import chex
import numpy.testing as npt
import pytest
from shinrl import Pendulum
@pytest.fixture
def setUp():
config = Pendulum.DefaultConfig(dA=5)
return config
def test_to_discrete_act(setUp):
from shinrl.envs.pendulum.calc import to_discrete_act
config = setUp
act = to_discrete_act(config, -0.4)
# jit testing
config10 = Pendulum.DefaultConfig(dA=50)
act = to_discrete_act(config10, -0.4)
def test_to_continuous_act(setUp):
from shinrl.envs.pendulum.calc import to_continuous_act
config = setUp
act = to_continuous_act(config, 2)
def test_state_to_th_vel(setUp):
from shinrl.envs.pendulum.calc import state_to_th_vel
config = setUp
th, vel = state_to_th_vel(config, 1)
npt.assert_allclose(th, -2.938909)
npt.assert_allclose(vel, -8)
def test_th_vel_to_state(setUp):
from shinrl.envs.pendulum.calc import th_vel_to_state
config = setUp
state = th_vel_to_state(config, -2.938909, -8)
assert state == 1
def test_transition(setUp):
from shinrl.envs.pendulum.calc import transition
config = setUp
next_state, probs = transition(config, 1, 2)
chex.assert_shape(next_state, (1,))
chex.assert_shape(probs, (1,))
def test_reward(setUp):
from shinrl.envs.pendulum.calc import reward
config = setUp
rew = reward(config, 1, 2)
npt.assert_allclose(rew, -15.0373, rtol=1e-3)
def test_observation():
from shinrl.envs.pendulum.calc import observation_tuple
config = Pendulum.DefaultConfig(obs_mode="tuple")
obs = observation_tuple(config, 1)
chex.assert_shape(obs, (3,))
from shinrl.envs.pendulum.calc import observation_image
config = Pendulum.DefaultConfig(obs_mode="image")
obs = observation_image(config, 1)
chex.assert_shape(obs, (28, 28, 1))
|
omron-sinicx/ShinRL
|
tests/envs/pendulum/pendulum_calc_test.py
|
pendulum_calc_test.py
|
py
| 1,812 |
python
|
en
|
code
| 42 |
github-code
|
6
|
[
{
"api_name": "shinrl.Pendulum.DefaultConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "shinrl.Pendulum",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "shinrl.envs.pendulum.calc.to_discrete_act",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "shinrl.Pendulum.DefaultConfig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "shinrl.Pendulum",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "shinrl.envs.pendulum.calc.to_discrete_act",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "shinrl.envs.pendulum.calc.to_continuous_act",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "shinrl.envs.pendulum.calc.state_to_th_vel",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "shinrl.envs.pendulum.calc.th_vel_to_state",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "shinrl.envs.pendulum.calc.transition",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "chex.assert_shape",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "chex.assert_shape",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "shinrl.envs.pendulum.calc.reward",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "shinrl.Pendulum.DefaultConfig",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "shinrl.Pendulum",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "shinrl.envs.pendulum.calc.observation_tuple",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "chex.assert_shape",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "shinrl.Pendulum.DefaultConfig",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "shinrl.Pendulum",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "shinrl.envs.pendulum.calc.observation_image",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "chex.assert_shape",
"line_number": 77,
"usage_type": "call"
}
] |
32508697252
|
import numpy as np
import torch
from tqdm import tqdm
import torch.distributed as dist
import libs.utils as utils
from trainers.abc import AbstractBaseTrainer
from utils.metrics import AverageMeterSet
from libs.utils.metrics import intersectionAndUnionGPU
from datasets.dataset_utils import get_label_2_train
class SingleTrainer(AbstractBaseTrainer):
def __init__(self, models, dataloaders, criterions, optimizers, lr_schedulers, num_epochs, train_loggers,
val_loggers, **kwargs):
print("Single Trainer")
super().__init__(models, dataloaders, criterions, optimizers, lr_schedulers, num_epochs,
train_loggers, val_loggers, **kwargs)
self.model = models['segmentation']
self.criterion = criterions
self.val_dataloaders = dataloaders['val']
self.dataset_name = list(self.val_dataloaders.keys())[0]
self.val_dataloader = self.val_dataloaders[self.dataset_name]
self.num_classes = kwargs['num_classes']
def train_one_epoch(self, epoch):
average_meter_set = AverageMeterSet()
dataloader_tqdm = tqdm(self.train_dataloader, desc="Epoch {}".format(epoch))
for batch_idx, (image, target, _) in enumerate(dataloader_tqdm):
image, target = image.cuda(non_blocking=True), target.cuda(non_blocking=True)
self._reset_grad()
outputs = self.model(image)[0]
loss = self.criterion(outputs, target)
loss.backward()
self._update_grad()
self._step_schedulers(batch_idx, epoch)
average_meter_set.update('train_loss', loss.item())
dataloader_tqdm.set_description('Train loss: %.3f' % average_meter_set['train_loss'].avg)
train_results = average_meter_set.averages()
return train_results
@torch.no_grad()
def validate(self, epoch):
total_correct, total_label, total_inter, total_union = 0, 0, 0, 0
pix_acc, miou = 0, 0
tbar = tqdm(self.val_dataloader, desc='\r')
for i, (image, target, _) in enumerate(tbar):
image, target = image.cuda(non_blocking=True), target.cuda(non_blocking=True)
preds = self.model(image)[0]
inter, union, _ = intersectionAndUnionGPU(preds.max(1)[1], target, K=self.num_classes)
if self.distributed_training:
dist.all_reduce(inter), dist.all_reduce(union)
inter, union = inter.cpu().numpy(), union.cpu().numpy()
total_inter += inter
total_union += union
IoU = 1.0 * total_inter / (np.spacing(1) + total_union)
miou = IoU.mean()
tbar.set_description('pixAcc: %.3f, mIoU: %.3f' % (pix_acc, miou))
results = {'{}_pixAcc'.format(self.dataset_name): 0,
'{}_mIoU'.format(self.dataset_name): miou}
return results
|
numpee/UniSeg
|
trainers/single_trainer.py
|
single_trainer.py
|
py
| 2,887 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "trainers.abc.AbstractBaseTrainer",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "utils.metrics.AverageMeterSet",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "libs.utils.metrics.intersectionAndUnionGPU",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.distributed.all_reduce",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "numpy.spacing",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 45,
"usage_type": "call"
}
] |
31381062627
|
from faker import Faker
from app.db.dev import db
from app.db.models import Team
class TeamSeeder:
"""
Seeder class for generating team data.
"""
def __init__(self):
"""
Initialize the TeamSeeder class.
"""
self.fake = Faker()
def generate_teams(self, count):
"""
Generate a specified number of team records.
Args:
count (int): The number of team records to generate.
Returns:
list: A list of generated team records.
"""
for _ in range(count):
team = Team(name=self.fake.bs(),
image_url=self.fake.image_url())
db.session.add(team)
db.session.commit()
teams = Team.query.all()
return teams
@classmethod
def get_all_teams(cls):
"""
Querys for all team records
Returns:
list: A list of all team records.
"""
return Team.query.all()
@classmethod
def clear_teams(cls):
"""
Deletes all user records.
Returns:
int: Number of deleted team records.
"""
num_deleted = db.session.query(Team).delete()
db.session.commit()
return num_deleted
|
rajahwu/FpGp
|
project_prep/app/db/seeders/teams.py
|
teams.py
|
py
| 1,271 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "faker.Faker",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.db.models.Team",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "app.db.dev.db.session.add",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "app.db.dev.db.session",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "app.db.dev.db",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "app.db.dev.db.session.commit",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "app.db.dev.db.session",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "app.db.dev.db",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "app.db.models.Team.query.all",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "app.db.models.Team.query",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "app.db.models.Team",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "app.db.models.Team.query.all",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "app.db.models.Team.query",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "app.db.models.Team",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "app.db.dev.db.session.query",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "app.db.models.Team",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "app.db.dev.db.session",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "app.db.dev.db",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "app.db.dev.db.session.commit",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "app.db.dev.db.session",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "app.db.dev.db",
"line_number": 53,
"usage_type": "name"
}
] |
6226428783
|
# -*- coding: utf-8 -*-
import os
from flask import request
from flask import jsonify
from flask import Flask, g
from flask import render_template
from flask.ext.babel import Babel
import PIL
import base64
import numpy as np
from PIL import Image
from io import BytesIO
from datetime import datetime
import tensorflow as tf
import tflearn
import cloudinary
import cloudinary.uploader
app = Flask(__name__)
babel = Babel(app)
cloudinary.config(
cloud_name=os.environ.get('CLOUDINARY_CLOUD_NAME'),
api_key=os.environ.get('CLOUDINARY_API_KEY'),
api_secret=os.environ.get('CLOUDINARY_API_SECRET')
)
def build_model():
tf.reset_default_graph()
net = tflearn.input_data([None, 784])
net = tflearn.fully_connected(net, 300, activation='ReLU')
net = tflearn.fully_connected(net, 100, activation='ReLU')
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.05, loss='categorical_crossentropy')
model = tflearn.DNN(net)
return model
# criação e carregamento do modelo
model = build_model()
model.load(os.path.dirname(os.path.abspath(__file__)) + '/MNIST.tfl')
@app.route('/', methods=['GET'])
def home():
return render_template('home.html')
@app.route('/predict/', methods=['POST'])
def predict():
data = request.form['canvas']
data = base64.b64decode(data.replace('data:image/png;base64,', ''))
img = Image.open(BytesIO(data))
img = fill_background(img)
img = resize(img, 28)
X = do_array(img)
X = X.reshape(784)
try:
y = model.predict([X])
resp = get_answer(y)
except:
resp = None
save_image(img, resp)
return jsonify(resp)
def resize(img, width):
wpercent = (width / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((width, hsize), PIL.Image.ANTIALIAS)
return img
def do_array(img):
temp = img
temp = temp.convert('1')
A = np.array(temp)
new_A = np.empty((A.shape[0], A.shape[1]), None)
for i in range(len(A)):
for j in range(len(A[i])):
if A[i][j] == True:
new_A[i][j] = 0
else:
new_A[i][j] = 1
return new_A
def fill_background(image):
image.convert("RGBA")
pixel_data = image.load()
if image.mode == "RGBA":
for y in range(image.size[1]):
for x in range(image.size[0]):
if pixel_data[x, y][3] < 255:
pixel_data[x, y] = (255, 255, 255, 255)
return image
def get_answer(y):
best = max(y[0])
return y[0].index(best)
def save_image(img, name):
now = datetime.now().strftime('%Y%m%d%H%M%S')
name = '%s-%s.png' % (name, now)
path = '/tmp/%s' % name
img.save(path)
name = 'mnist/%s' % name
cloudinary.uploader.upload(path, public_id=name)
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(['pt', 'en'])
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
rafaelnovello/mnist-demo
|
webapp/app.py
|
app.py
|
py
| 3,112 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.ext.babel.Babel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cloudinary.config",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reset_default_graph",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tflearn.input_data",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tflearn.fully_connected",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tflearn.fully_connected",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tflearn.fully_connected",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tflearn.regression",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tflearn.DNN",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "base64.b64decode",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "cloudinary.uploader.upload",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "cloudinary.uploader",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "flask.request.accept_languages.best_match",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "flask.request.accept_languages",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 131,
"usage_type": "attribute"
}
] |
21792958512
|
import cv2
import pytesseract
from wand.image import Image
from PyPDF2 import PdfFileReader
def ocr(image_path):
# Чтение изображения
image = cv2.imread(image_path)
# Преобразование изображения в оттенки серого
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Применение алгоритма бинаризации для улучшения распознавания текста
threshold_image = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# Применение OCR для распознавания текста из изображения
recognized_text = pytesseract.image_to_string(threshold_image, lang='eng')
return recognized_text
def convert_pdf_to_jpg(pdf_path, output_path):
# Чтение PDF файла
pdf = PdfFileReader(open(pdf_path, 'rb'))
# Проход по каждой странице PDF и сохранение ее как изображения в формате JPG
for page_number in range(pdf.numPages):
with Image(filename=f'{pdf_path}[{page_number}]') as image:
# Преобразование изображения в формат JPG
image.format = 'jpg'
# Сохранение изображения на диск
image.save(filename=f'{output_path}/{page_number}.jpg')
pdf_path = 'path/to/pdf.pdf'
output_directory = 'path/to/output/directory'
convert_pdf_to_jpg(pdf_path, output_directory)
image_path = 'path/to/image.jpg'
text = ocr(image_path)
print(text)
|
Vlad-Goncharov/info_file_detection
|
main.py
|
main.py
|
py
| 1,618 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wand.image.Image",
"line_number": 28,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.