Dataset Viewer (First 5GB)
seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37530746951
|
from django.core.management.base import BaseCommand
from assessment.models.assessment_model import AssessmentType
class Command(BaseCommand):
help = 'Creates initial Assessment Types'
def handle(self, *args, **options):
# Creating 'Homework' AssessmentType
homework, created = AssessmentType.objects.get_or_create(
name='Homework'
)
if created:
self.stdout.write(self.style.SUCCESS('Successfully created Homework AssessmentType'))
else:
self.stdout.write(self.style.SUCCESS('Homework AssessmentType already exists'))
# Creating 'Test' AssessmentType
test, created = AssessmentType.objects.get_or_create(
name='Test'
)
if created:
self.stdout.write(self.style.SUCCESS('Successfully created Test AssessmentType'))
else:
self.stdout.write(self.style.SUCCESS('Test AssessmentType already exists'))
|
markoco14/student-mgmt
|
assessment/management/commands/create_assessment_types.py
|
create_assessment_types.py
|
py
| 959 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects.get_or_create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects.get_or_create",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "assessment.models.assessment_model.AssessmentType",
"line_number": 19,
"usage_type": "name"
}
] |
17609833661
|
# encoding: utf-8
from django.urls import reverse
from rest_framework import serializers
from mainsite.serializers import StripTagsCharField
from mainsite.utils import OriginSetting
class ExternalToolSerializerV1(serializers.Serializer):
name = StripTagsCharField(max_length=254)
client_id = StripTagsCharField(max_length=254)
slug = StripTagsCharField(max_length=255, source='entity_id', read_only=True)
def to_representation(self, instance):
representation = super(ExternalToolSerializerV1, self).to_representation(instance)
representation['launchpoints'] = {
lp.launchpoint: {
"url": "{}{}".format(OriginSetting.HTTP, reverse("v1_api_externaltools_launch", kwargs=dict(
launchpoint=lp.launchpoint,
slug=lp.cached_externaltool.entity_id
))),
"launch_url": lp.launch_url,
"label": lp.label,
"icon_url": lp.icon_url
} for lp in instance.cached_launchpoints()
}
return representation
class ExternalToolLaunchSerializerV1(serializers.Serializer):
launch_url = serializers.URLField()
def to_representation(self, instance):
representation = super(ExternalToolLaunchSerializerV1, self).to_representation(instance)
requesting_user = self.context['request'].user if 'request' in self.context else None
context_id = self.context.get('tool_launch_context_id', None)
representation['launch_data'] = instance.generate_launch_data(user=requesting_user, context_id=context_id)
return representation
|
reedu-reengineering-education/badgr-server
|
apps/externaltools/serializers_v1.py
|
serializers_v1.py
|
py
| 1,636 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "mainsite.serializers.StripTagsCharField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mainsite.serializers.StripTagsCharField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mainsite.serializers.StripTagsCharField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mainsite.utils.OriginSetting.HTTP",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mainsite.utils.OriginSetting",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.URLField",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 33,
"usage_type": "name"
}
] |
13131048154
|
import psycopg2
import datetime
import time
import sys
import requests
import scipy.io.wavfile
import json
import vokaturi.Vokaturi as Vokaturi
Vokaturi.load("./vokaturi/OpenVokaturi-3-3-linux64.so")
from ms_text_analysis import *
from cassandra_test import *
MSSp = MSSpeechToText()
MST = MSTextAnalysis()
MSAD = MSAnomalyDetector()
CApi = CassandraAPI()
def get_emotions(filename):
(sample_rate, samples) = scipy.io.wavfile.read(filename)
buffer_length = len(samples)
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:
c_buffer[:] = samples[:] / 32768.0
else:
c_buffer[:] = 0.5 * (samples[:, 0] + samples[:, 1]) / 32768.0
voice = Vokaturi.Voice(sample_rate, buffer_length)
voice.fill(buffer_length, c_buffer)
quality = Vokaturi.Quality()
emotionProbabilities = Vokaturi.EmotionProbabilities()
voice.extract(quality, emotionProbabilities)
voice.destroy()
if quality.valid:
return ("%.3f" % emotionProbabilities.neutrality,
"%.3f" % emotionProbabilities.happiness,
"%.3f" % emotionProbabilities.sadness,
"%.3f" % emotionProbabilities.anger,
"%.3f" % emotionProbabilities.fear)
else: raise RuntimeError('bad quality')
DB_NAME = "defaultdb"
USER = "avnadmin"
PASSWORD = "hq3fi662tthholn2"
HOST = "pg-2e774192-dimak24-5fb9.aivencloud.com"
PORT = "21756"
INFLUXDB_HOST = "influx-1ab60b47-dimak24-5fb9.aivencloud.com"
INFLUXDB_PORT = "21756"
INFLUXDB_DB_NAME = "defaultdb"
INFLUXDB_USER = "avnadmin"
INFLUXDB_PASSWORD = "e6gkm3n9bmvcbpfb"
def _execute_op(operation):
conn = psycopg2.connect(
database=DB_NAME,
user=USER,
password=PASSWORD,
host=HOST,
port=PORT)
cur = conn.cursor()
try:
res = operation(cur)
conn.commit()
return res
except psycopg2.Error as e:
print(e)
# raise
finally:
cur.close()
conn.close()
def _execute(*args):
_execute_op(lambda cur: cur.execute(*args))
def _execute_fetch(*args):
def _op(cur):
cur.execute(*args)
return cur.fetchall()
return _execute_op(_op)
def _influxdb_query(query):
return json.loads(requests.post(f'https://{INFLUXDB_USER}:{INFLUXDB_PASSWORD}@{INFLUXDB_HOST}:{INFLUXDB_PORT}/query?db={INFLUXDB_DB_NAME}',
data='q=' + query, headers={'content-type': 'application/x-www-form-urlencoded'}).text)
def _influxdb_write(measurement, args):
query = ', '.join([','.join([f'{tag["name"]}={tag["value"]}' for tag in arg['tags']]) + f' value={arg["value"]}' for arg in args])
return requests.post(f'https://{INFLUXDB_USER}:{INFLUXDB_PASSWORD}@{INFLUXDB_HOST}:{INFLUXDB_PORT}/write?db={INFLUXDB_DB_NAME}',
data=f'{measurement},{query} {int(time.time() * 1e9)}',
headers={'content-type': 'application/x-www-form-urlencoded'}).text
def cassandra_insert(u_id, timestamp, filename, comment='comment'):
with open(filename, 'rb') as file:
print(CApi.db_execute("""INSERT INTO cycling.records (u_d,r_time,audio,comment)
VALUES(%s,%s,%s,%s)""",
(str(u_id), timestamp, file.read(), comment)))
def load_record(u_id, timestamp):
result_set = CApi.db_query("SELECT * FROM cycling.records where u_d=%s and r_time=%s ALLOW FILTERING;", (u_id, int(timestamp)))
for res in result_set: return res.audio
def create_tables():
_execute('''CREATE TABLE diary
(u_id INT NOT NULL,
r_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
script TEXT NOT NULL,
score REAL NOT NULL,
is_anomaly INT NOT NULL,
neutrality REAL NOT NULL,
happiness REAL NOT NULL,
sadness REAL NOT NULL,
anger REAL NOT NULL,
fear REAL NOT NULL);''')
_execute('''CREATE TABLE users
(u_id SERIAL,
name CHAR(30) NOT NULL,
age INT);''')
_execute('''CREATE TABLE avatars
(u_id INT NOT NULL,
name CHAR(50) NOT NULL,
level INT NOT NULL DEFAULT 0);''')
CApi.db_execute("""CREATE KEYSPACE cycling WITH REPLICATION = {
'class' : 'SimpleStrategy',
'replication_factor' : 1
};""")
CApi.db_execute("""CREATE TABLE records (
u_d text PRIMARY KEY,
r_time int,
audio blob,
comment text );""")
# debug
def drop_tables():
return
CApi.db_execute('DROP TABLE records;')
_execute('DROP TABLE diary, users, avatars;')
_influxdb_query('DROP MEASUREMENT mental_metrics')
def create_user(name, age=None, avatar_name=None):
if avatar_name is None:
avatar_name = f'{name}\'s avatar'
assert len(name) <= 30
assert len(avatar_name) <= 50
if age is not None:
res = _execute_fetch('''INSERT INTO users (name, age)
VALUES (%s, %s) RETURNING u_id;''', (name, age))
else:
res = _execute_fetch('''INSERT INTO users (name)
VALUES (%s) RETURNING u_id;''', (name,))
u_id = res[0][0]
_execute('''INSERT INTO avatars (u_id, name)
VALUES (%s, %s);''', (u_id, avatar_name))
return u_id
def to_timestamp(influxdb_date):
d, t = influxdb_date[:-1].split('T')
h, m, s = t.split(':')
s = int(s.split('.')[0])
h = int(h) + 3
return int(datetime.datetime(*list(map(int, d.split('-'))), h, int(m), s).strftime("%s"))
def to_azure(timestamp):
_date = datetime.date.fromtimestamp(timestamp)
return f'{_date.year}-{_date.month}-{_date.day}T12:00:00Z'
def make_daily_series(series):
s, n, last = 0, 0, None
res = []
for record in sorted(series, key=lambda _record: to_timestamp(_record[0])) + [(-1, -1)]:
timestamp, metric = record
if timestamp != -1:
date = int(datetime.date.fromtimestamp(to_timestamp(timestamp)).strftime("%s"))
else: date = -2
if date != last:
if last is not None:
s /= n
if len(res) > 0: mean = (s + res[-1][1]) / 2
while len(res) > 0 and date - res[-1][0] > 86400 * 2:
res.append([res[-1][0] + 86400, mean])
res.append([last, s])
last = date
n, s = 0, 0
s += metric
n += 1
for i in range(len(res)): res[i][0] = to_azure(res[i][0])
return res
def insert_record(u_id, data_file, date=int(time.time())):
if date is None: date=int(time.time())
text = MSSp.process(data_file)
score = MST.process(text)
metrics = get_emotions(data_file)
for type, value in zip(['neutrality', 'happiness', 'sadness', 'anger', 'fear', 'score'],
metrics + (score,)):
_influxdb_write('mental_metrics',
[{'tags': [
{'name': 'u_id', 'value': u_id},
{'name': 'type', 'value': type}],
'value': value}])
res = _influxdb_query('SELECT "time","value" FROM "mental_metrics" WHERE u_id=\'%s\' AND type=\'score\''%u_id)
try:
series = res['results'][0]['series'][0]['values']
series = make_daily_series(series)
if len(series) < 12: is_anomaly = 0
else: is_anomaly = MSAD.process(series)
except:
print(res)
raise
new_level = _execute_fetch('''UPDATE avatars SET level = level + 1 WHERE u_id = %s
RETURNING level;''', (u_id,))[0][0]
print(u_id, date)
cassandra_insert(u_id, date, data_file)
return _execute_fetch('''INSERT INTO diary (u_id, r_time, script, score, is_anomaly, neutrality, happiness, sadness, anger, fear)
VALUES (%s, to_timestamp(%s), %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING score, is_anomaly, neutrality, happiness, sadness, anger, fear;''',
(u_id, date, text, score, is_anomaly, *metrics))[0] + (int(new_level), text)
def get_records(u_id, date_from=None, date_to=None, phrase=None):
date_range = ''
if date_from is not None:
date_range += f" AND r_time >= to_timestamp('{date_from}', 'yyyy-mm-dd')"
if date_to is not None:
date_range += f" AND r_time < to_timestamp('{date_to}', 'yyyy-mm-dd')"
if phrase is not None:
return _execute_fetch(f"""SELECT r_time, script FROM diary
WHERE u_id = {u_id} {date_range} AND
data LIKE '%{phrase}%'""")
return _execute_fetch(f"""SELECT r_time, script FROM diary
WHERE u_id = {u_id} {date_range}""")
def get_audio(u_id, timestamp):
return load_record(u_id, timestamp)
|
raid-7/SmartDiary
|
backend/main.py
|
main.py
|
py
| 9,127 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "vokaturi.Vokaturi.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "scipy.io.wavfile.io.wavfile.read",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.io",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.SampleArrayC",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.Voice",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.Quality",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "vokaturi.Vokaturi.EmotionProbabilities",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "vokaturi.Vokaturi",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "psycopg2.connect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.date.fromtimestamp",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.fromtimestamp",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 227,
"usage_type": "call"
}
] |
42896231712
|
import math
from functools import partial
from typing import Any, Callable
import jax
import jax.numpy as jnp
from chex import ArrayTree
from jax import tree_map, vmap
from jax.scipy.special import logsumexp
from ..resamplings import multinomial
STATE = Any
@partial(jax.jit, static_argnums=(2, 3, 4), donate_argnums=(0, 1))
def operator(inputs_a: STATE, inputs_b: STATE, log_weight_fn: Callable[[ArrayTree, ArrayTree, Any], float],
n_samples: int, last_step: bool):
"""
Operator corresponding to the stitching operation of the conditional dSMC algorithm.
Parameters
----------
inputs_a: STATE
A tuple of three arguments.
First one is the state of the partial dSMC smoother to the left of the current time step.
Second are the jax random keys used for resampling at the time steps to the left of the current time step.
Third are the parameters used to compute the mixing weights to the left of the current time step.
inputs_b: STATE
Same as `inputs_a` but to the right of the current time step
log_weight_fn: callable
Function that computes the un-normalised stitching N^2 weights, first argument is x_{t-1}, second is x_t, and
third is the parameters.
It will be automatically batched so only needs to be expressed elementwise
n_samples: int
Number of samples in the resampling
last_step: bool
Whether we are at the last time step or not. If so, we only need one trajectory.
Returns
-------
"""
# Unpack the states
state_a, keys_a, params_a = inputs_a
state_b, keys_b, params_b = inputs_b
trajectories_a, log_weights_a, origins_a = state_a
trajectories_b, log_weights_b, origins_b = state_b
weights = get_weights_batch(trajectories_a, log_weights_a,
trajectories_b, log_weights_b, params_b,
log_weight_fn)
if last_step:
# If last step
idx = jax.random.choice(keys_b[0], n_samples ** 2, p=jnp.ravel(weights))
l_idx, r_idx = jnp.unravel_index(idx, (n_samples, n_samples))
else:
idx = multinomial(keys_b[0], jnp.ravel(weights), n_samples)
l_idx, r_idx = jax.vmap(jnp.unravel_index, in_axes=[0, None])(idx, (n_samples, n_samples))
return _gather_results(l_idx, r_idx, n_samples,
trajectories_a, origins_a, log_weights_a, keys_a, params_a,
trajectories_b, origins_b, log_weights_b, keys_b, params_b)
def _gather_results(left_idx, right_idx, n_samples,
trajectories_a, origins_a, log_weights_a, keys_a, params_a,
trajectories_b, origins_b, log_weights_b, keys_b, params_b):
# If we are using conditional dSMC, we need to make sure to preserve the first trajectory.
# Resample the trajectories
trajectories_a = tree_map(lambda z: jnp.take(z, left_idx, 1), trajectories_a)
trajectories_b = tree_map(lambda z: jnp.take(z, right_idx, 1), trajectories_b)
# Keep track of the trajectories origins for analysis down the line (not used in the algo)
origins_a = jnp.take(origins_a, left_idx, 1)
origins_b = jnp.take(origins_b, right_idx, 1)
# Gather the results
keys = jnp.concatenate([keys_a, keys_b])
params = tree_map(lambda a, b: jnp.concatenate([a, b]), params_a, params_b)
origins = jnp.concatenate([origins_a, origins_b])
trajectories = tree_map(lambda a, b: jnp.concatenate([a, b]), trajectories_a, trajectories_b)
log_weights = jnp.concatenate([jnp.full_like(log_weights_a, -math.log(n_samples)),
jnp.full_like(log_weights_b, -math.log(n_samples))])
return (trajectories, log_weights, origins), keys, params
def get_weights_batch(trajectories_a, log_weights_a,
trajectories_b, log_weights_b, params_b,
log_weight_fn: Callable[[ArrayTree, ArrayTree, Any], float]):
# House keeping to get the required inputs.
params_t = tree_map(lambda z: z[0], params_b)
x_t_1 = tree_map(lambda z: z[-1], trajectories_a)
x_t = tree_map(lambda z: z[0], trajectories_b)
log_w_t_1 = log_weights_a[-1]
log_w_t = log_weights_b[0]
log_weights = get_log_weights(x_t_1, log_w_t_1,
x_t, log_w_t, params_t,
log_weight_fn)
ell_inc = logsumexp(log_weights)
weights = jnp.exp(log_weights - ell_inc)
return weights
def get_log_weights(x_t_1, log_w_t_1,
x_t, log_w_t, params_t,
log_weight_fn):
# House keeping to get the required inputs.
# This nested vmap allows to define log_weight_fn more easily at the API level. This is to create a
# (N,N) -> N^2 function while only having to care about elementwise formulas.
# if log_weight_fn = lambda a, b: u * v, then this corresponds to np.outer.
vmapped_log_weight_fn = vmap(vmap(log_weight_fn,
in_axes=[None, 0, None], out_axes=0),
in_axes=[0, None, None], out_axes=0)
log_weight_increment = vmapped_log_weight_fn(x_t_1, x_t, params_t) # shape = M, N
# Take the corresponding time step and reshape to allow for adding residual weights in parallel
log_weights = log_weight_increment + log_w_t_1[:, None] + log_w_t[None, :]
return log_weights
|
AdrienCorenflos/aux-ssm-samplers
|
aux_samplers/_primitives/csmc/pit/operator.py
|
operator.py
|
py
| 5,444 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "typing.Any",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "chex.ArrayTree",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "jax.random.choice",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "jax.random",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "jax.numpy.ravel",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "jax.numpy.unravel_index",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "resamplings.multinomial",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "jax.numpy.ravel",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "jax.vmap",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "jax.numpy.unravel_index",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "jax.numpy",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jax.jit",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "jax.tree_map",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "jax.numpy.take",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "jax.numpy.take",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "jax.numpy.take",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "jax.numpy.take",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "jax.numpy.concatenate",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "jax.numpy.full_like",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "jax.numpy.full_like",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "math.log",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "chex.ArrayTree",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "jax.tree_map",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "jax.tree_map",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "jax.tree_map",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "jax.scipy.special.logsumexp",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "jax.numpy.exp",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "jax.vmap",
"line_number": 119,
"usage_type": "call"
}
] |
21247144104
|
from datetime import timedelta
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.sensors.filesystem import FileSensor
from directories import (
VOLUME_PATH, LAST_MODEL_DIR, PREDICTIONS_DIR,
RAW_DATA_DIR, PROCESSED_DATA_DIR, START_DATE,
PROD_DATE,
)
default_args = {
"owner": "airflow",
"email": ["[email protected]"],
"email_on_failure": True,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
with DAG(
"predict_daily",
default_args=default_args,
schedule_interval="@daily",
start_date=START_DATE,
) as dag:
start_task = DummyOperator(task_id="start-predict")
wait_data = FileSensor(
task_id="wait-data",
filepath="./data/raw/{{ ds }}/data.csv",
poke_interval=10,
retries=100,
)
wait_model = FileSensor(
task_id="wait-model",
filepath=f"./data/models/{PROD_DATE}/model.pkl",
poke_interval=10,
retries=100,
)
preprocess_cmd = (
f" --input-dir {RAW_DATA_DIR}"
f" --output-dir {PROCESSED_DATA_DIR}"
f" --mode eval"
)
preprocess = DockerOperator(
image="airflow-preprocess",
task_id="docker-preprocess",
command=preprocess_cmd,
network_mode="bridge",
do_xcom_push=False,
volumes=[f"{VOLUME_PATH}:/data"],
)
predict_cmd = (
f" --input-dir {PROCESSED_DATA_DIR}"
f" --model-dir {LAST_MODEL_DIR}"
f" --output-dir {PREDICTIONS_DIR}"
)
predict = DockerOperator(
image="airflow-predict",
task_id="docker-predict",
command=predict_cmd,
network_mode="bridge",
do_xcom_push=False,
volumes=[f"{VOLUME_PATH}:/data"],
)
end_task = DummyOperator(task_id="end-predict")
start_task >> [wait_data, wait_model] >> preprocess >> predict >> end_task
|
made-ml-in-prod-2021/truengineer
|
airflow_ml_dags/dags/predict_daily.py
|
predict_daily.py
|
py
| 1,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.timedelta",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "directories.START_DATE",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "airflow.operators.dummy.DummyOperator",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "airflow.sensors.filesystem.FileSensor",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "airflow.sensors.filesystem.FileSensor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "directories.PROD_DATE",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "directories.RAW_DATA_DIR",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "directories.PROCESSED_DATA_DIR",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "airflow.providers.docker.operators.docker.DockerOperator",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "directories.VOLUME_PATH",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "directories.PROCESSED_DATA_DIR",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "directories.LAST_MODEL_DIR",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "directories.PREDICTIONS_DIR",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "airflow.providers.docker.operators.docker.DockerOperator",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "directories.VOLUME_PATH",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "airflow.operators.dummy.DummyOperator",
"line_number": 76,
"usage_type": "call"
}
] |
37598101859
|
import pytz
from sqlalchemy.orm import Session
import models
import schemas
def create_module_build(db: Session, build: schemas.ModuleBuild):
print(build)
db_build = models.ModuleBuild(
module=build.module,
build_time=build.build_time,
result=build.result,
finished_at=build.finished_at.astimezone(pytz.utc),
maven_opts=build.maven_opts,
uname=build.uname,
uuid=str(build.uuid),
cpu=build.cpu,
mem=build.mem,
)
db.add(db_build)
db.flush()
return db_build
|
fresch/maven-build-tracker
|
crud/CreateBuild.py
|
CreateBuild.py
|
py
| 555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "schemas.ModuleBuild",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.ModuleBuild",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 15,
"usage_type": "attribute"
}
] |
17609874011
|
# encoding: utf-8
from django.core.management import BaseCommand
from issuer.models import BadgeClass
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--limit',
type=int,
help='Number of model instances to process in a batch',
default=1000
)
def handle(self, *args, **options):
model = BadgeClass
processed_count = 0
limit = options['limit']
queryset = model.objects.filter(image_hash='').exclude(image='')
processing = True
while processing:
active_set = queryset[0:limit]
self.stdout.write(str(active_set.query))
if active_set.exists():
for instance in active_set:
instance.save()
self.stdout.write("Calculated initial image_hash for {} #{}: {}".format(
instance.__class__.__name__, instance.pk, instance.image_hash)
)
processed_count += 1
else:
processing = False
self.stdout.write("Finished processing populate_image_hashes for model {}. {} records updated.".format(
model.__name__, processed_count)
)
|
reedu-reengineering-education/badgr-server
|
apps/issuer/management/commands/populate_image_hashes.py
|
populate_image_hashes.py
|
py
| 1,279 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "django.core.management.BaseCommand",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeClass",
"line_number": 17,
"usage_type": "name"
}
] |
70818525948
|
import speech_recognition as sr
import multiprocessing as mp
import os
import time
def func(n):
print("Task {} convert successfully".format(n))
speechToText()
time.sleep(2) #simulate processing or server return time
print("Task {} has been done now.".format(n))
def speechToText():
r = sr.Recognizer()
# Reading Audio file as source
# listening the audio file and store in audio_text variable
# The path should be correct
with sr.AudioFile('Sample.wav') as source:
audio = r.listen(source)
# Using exception handling in case the api could not be acceessed successfully.
try:
# using google speech recognition
text = r.recognize_google(audio)
print(text)
except:
print('Could not access API, please run it again.')
if __name__ == '__main__':
nums_core = mp.cpu_count()
print("There are {} cores being used now.".format(nums_core))
pool = mp.Pool(nums_core) #use all available cores
for i in range(0, 16):
pool.apply_async(func, args=(i,))
pool.close()
pool.join()
|
CHAODENG/Project4
|
project4.py
|
project4.py
|
py
| 1,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "speech_recognition.AudioFile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 39,
"usage_type": "call"
}
] |
40087266458
|
import os
from meteo_ist.models import meteo_data, range_data
from django.utils.dateparse import parse_date
def upload_db(data):
for i in range(0, len(data['datetime'])):
date = parse_date(data['datetime'][i]) # parse string do date format
pp = data['data']['pp'][i]
pres = data['data']['pres'][i]
rad = data['data']['rad'][i]
rh = data['data']['rh'][i]
temp = data['data']['temp'][i]
wd = data['data']['wd'][i]
wg = data['data']['wg'][i]
ws = data['data']['ws'][i]
b = meteo_data(date, pp, pres, rad, rh, temp, wd, wg, ws)
b.save()
|
sandroferreira97/meteo_ist
|
meteo_ist/services.py
|
services.py
|
py
| 632 |
python
|
tr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.utils.dateparse.parse_date",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "meteo_ist.models.meteo_data",
"line_number": 18,
"usage_type": "call"
}
] |
74341979708
|
from collections import deque
count = int(input())
dataDeque = deque(list(range(1, count+1)))
while True:
if len(dataDeque) == 1:
print(dataDeque[0])
break
dataDeque.popleft()
dataDeque.append(dataDeque.popleft())
|
KingPiggy/PS
|
Baekjoon/큐, 덱/2164번 카드2.py
|
2164번 카드2.py
|
py
| 255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
}
] |
197603537
|
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
import aliens_functions as af
# 检测精灵碰撞
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,
aliens, bullets):
"""响应子弹和外星人的碰撞"""
# 检查是否有子弹击中了外星人
# 如果是这样,就删除相应的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
# 如果外星人全部被消灭,难度提升一个等级
start_new_level(ai_settings, screen, stats, sb, ship,
aliens, bullets)
def ship_hit(ai_settings, stats, sb, screen, ship, aliens, bullets):
"""响应被外星人撞到飞船"""
if stats.ships_left > 0:
# 将ships_left减1
stats.ships_left -= 1
# 更新记分牌
sb.prep_ships()
# 清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
# 创建一群新的外星人,并将飞船放到屏幕底端中央
af.create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# 暂停
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
# 其他检查
def check_high_score(stats, sb):
"""检查是否诞生了新的最高分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
def start_new_level(ai_settings, screen, stats, sb, ship,
aliens, bullets):
"""提升游戏难度等级"""
if len(aliens) == 0:
# 如果整群外星人都被消灭,提高一个等级
bullets.empty()
ai_settings.increase_speed()
# 提高等级
stats.level += 1
sb.prep_level()
af.create_fleet(ai_settings, screen, ship, aliens)
|
wanwan2qq/alien_invasion
|
collisions_functions.py
|
collisions_functions.py
|
py
| 2,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "aliens_functions.create_fleet",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.set_visible",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "aliens_functions.create_fleet",
"line_number": 70,
"usage_type": "call"
}
] |
16312390211
|
from typing import NamedTuple
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class LanguageDataset(NamedTuple):
records: tf.data.Dataset
vocab_size: int
def load(batch_size: int, sequence_length: int) -> LanguageDataset:
"""Load LM1B dataset, returning it and vocab_size."""
ds, ds_info = tfds.load(
'lm1b/subwords32k',
split=tfds.Split.TRAIN,
shuffle_files=True,
with_info=True)
crop_size = sequence_length + 1
ds = ds.repeat()
# Convert the dataset to constant-size int32 tensors.
ds = ds.map(lambda d: tf.cast(d['text'], tf.int32))
ds = ds.map(lambda t: _crop_or_pad(t, crop_size, pad_token=0))
ds = ds.shuffle(batch_size * 10)
# Create the language modeling observation/target pairs and batch them up.
ds = ds.map(lambda t: dict(obs=t[:-1], target=t[1:]))
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
ds = tfds.as_numpy(ds)
return LanguageDataset(ds, ds_info.features['text'].encoder.vocab_size)
def _crop_or_pad(value, size, pad_token):
"""Either crop or pad value to be of size size."""
val_size = tf.size(value)
pad = lambda: tf.pad(value, [[0, size - val_size]], 'CONSTANT', constant_values=pad_token)
return tf.cond(val_size < size, pad, lambda: value[:size])
|
ChrisWaites/data-deletion
|
src/adaptive_deletion/nlp/transformer/dataset.py
|
dataset.py
|
py
| 1,313 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "typing.NamedTuple",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.data",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tensorflow_datasets.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow_datasets.Split",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2.cast",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.int32",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2.data",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "tensorflow_datasets.as_numpy",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2.size",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.pad",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v2.cond",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v2",
"line_number": 38,
"usage_type": "name"
}
] |
43566450593
|
import requests
from pprint import pprint
import os
SHEET_ENDPOINT = "https://api.sheety.co/a65d37e4e4c4751b050905bbc69d2c13/myFlightDeals/prices"
HEADERS = {
"Authorization":os.environ.get("AUTH"),
"Content-Type":"application/json",
}
USR_ENDPOINT = os.environ.get("SHEET_ENd")
class DataManager:
#This class is responsible for talking to the Google Sheet.
def __init__(self):
self.response = requests.get(url=SHEET_ENDPOINT, headers=HEADERS)
self.response.raise_for_status()
def get_info(self):
data = self.response.json()["prices"]
return data
def update(self,row_id,iata):
changes = {
"price": {
"iataCode":iata,
}
}
edit = requests.put(url=f"{SHEET_ENDPOINT}/{row_id}",json=changes,headers=HEADERS)
edit.raise_for_status()
def get_emails(self):
mail_response = requests.get(url=USR_ENDPOINT,headers=HEADERS)
mail_response.raise_for_status()
mail_data = mail_response.json()["users"]
return mail_data
|
HazorTremz/FlightDealFinder
|
data_manager.py
|
data_manager.py
|
py
| 1,077 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
}
] |
44018209186
|
import numpy as np
from modAL.models import ActiveLearner
from modAL.multilabel import SVM_binary_minimum
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
n_samples = 500
X = np.random.normal(size=(n_samples, 2))
y = np.array([[int(x1 > 0), int(x2 > 0)] for x1, x2 in X])
n_initial = 10
initial_idx = np.random.choice(range(len(X)), size=n_initial, replace=False)
X_initial, y_initial = X[initial_idx], y[initial_idx]
X_pool, y_pool = np.delete(X, initial_idx, axis=0), np.delete(y, initial_idx, axis=0)
learner = ActiveLearner(
estimator=OneVsRestClassifier(LinearSVC()),
query_strategy=SVM_binary_minimum,
X_training=X_initial, y_training=y_initial
)
n_queries = 10
for idx in range(n_queries):
query_idx, query_inst = learner.query(X_pool)
learner.teach(X_pool[query_idx].reshape(1, -1), y_pool[query_idx].reshape(1, -1))
X_pool, y_pool = np.delete(X_pool, query_idx, axis=0), np.delete(y_pool, query_idx, axis=0)
|
modAL-python/modAL
|
tests/example_tests/multilabel_svm.py
|
multilabel_svm.py
|
py
| 981 |
python
|
en
|
code
| 2,058 |
github-code
|
6
|
[
{
"api_name": "numpy.random.normal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "modAL.models.ActiveLearner",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.multiclass.OneVsRestClassifier",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "modAL.multilabel.SVM_binary_minimum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.delete",
"line_number": 26,
"usage_type": "call"
}
] |
20615061350
|
'''test conf'''
import os
import datetime
from tokenleaderclient.rbac import wfc
from flexflow.configs.config_handler import Configs
from flexflow.dbengines.sqlchemy.models import dbdriver
test_data_path = os.path.join(os.path.dirname(__file__),
os.pardir, 'tests', 'testdata')
test_client_conf_file = os.path.join(test_data_path, 'test_client_configs.yml')
conf_file= os.path.join(test_data_path,'test_flexflow_configs.yml')
print(test_client_conf_file, conf_file)
# must_have_keys_in_yml = {'host_name',
# 'host_port',
# 'ssl',
# 'ssl_settings',
# 'database',
# 'secrets'
# 'celery'
# }
must_have_keys_in_yml = {}
testconf = Configs('flexflow', conf_file=conf_file, must_have_keys_in_yml=must_have_keys_in_yml)
yml = testconf.yml
con_string = dbdriver.get_connection_settings(testconf)
print('con_string', con_string)
test_db_conf = { 'SQLALCHEMY_DATABASE_URI': con_string,
'SQLALCHEMY_TRACK_MODIFICATIONS': False }
testwfc= wfc.WorkFuncContext()
testwfc.username = 'user1'
testwfc.org = 'ITC'
testwfc.orgunit = 'ou1'
testwfc.department = 'dept1'
testwfc.roles = ['role1', ]
testwfc.name = 'wfc1'
testwfc.email = '[email protected]'
testwfc.request_id = 'hhihihhh-890809-jklkk;k-ytfty'
testwfc.time_stamp = datetime.datetime.utcnow()
testwfc.client_address = '10.10.10.10'
tspwfc= wfc.WorkFuncContext()
tspwfc.username = 'TSP1user1'
tspwfc.org = 'TSP1'
tspwfc.orgunit = 'TSP1ou1'
tspwfc.department = 'TSP1dept1'
tspwfc.roles = ['role1', ]
tspwfc.name = 'TSP1wfc1'
tspwfc.email = '[email protected]'
tspwfc.request_id = 'TSP1ihhh-890809-jklkk;k-ytfty'
tspwfc.time_stamp = datetime.datetime.utcnow()
tspwfc.client_address = '10.10.10.10'
ITSSwfc= wfc.WorkFuncContext()
ITSSwfc.username = 'ITSSuser1'
ITSSwfc.org = 'ITC'
ITSSwfc.orgunit = 'ITSS'
ITSSwfc.department = 'ITSSept1'
ITSSwfc.roles = ['role1', ]
ITSSwfc.name = 'ITSSSwfc1'
ITSSwfc.email = '[email protected]'
ITSSwfc.request_id = 'ITSSihhh-890809-jklkk;k-ytfty'
ITSSwfc.time_stamp = datetime.datetime.utcnow()
ITSSwfc.client_address = '10.10.10.10'
MISwfc= wfc.WorkFuncContext()
MISwfc.username = 'MISuser1'
MISwfc.org = 'ITC'
MISwfc.orgunit = 'MIS1'
MISwfc.department = 'MISept1'
MISwfc.roles = ['role1', ]
MISwfc.name = 'MISwfc1'
MISwfc.email = '[email protected]'
MISwfc.request_id = 'MISihhh-890809-jklkk;k-ytfty'
MISwfc.time_stamp = datetime.datetime.utcnow()
MISwfc.client_address = '10.10.10.10'
|
BhujayKumarBhatta/flexflow
|
flexflow/configs/testconf.py
|
testconf.py
|
py
| 2,682 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.pardir",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flexflow.configs.config_handler.Configs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flexflow.dbengines.sqlchemy.models.dbdriver.get_connection_settings",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flexflow.dbengines.sqlchemy.models.dbdriver",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "tokenleaderclient.rbac.wfc.WorkFuncContext",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tokenleaderclient.rbac.wfc",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 76,
"usage_type": "attribute"
}
] |
17215982737
|
# coding: utf-8
import cv2
import dlib
import sys
import face_recognition
import numpy as np
import os
def getdemo(face_file_path):
# 导入人脸检测模型
print("当前检测图片为:",face_file_path)
predicter_path ='shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
# 导入检测人脸特征点的模型
sp = dlib.shape_predictor(predicter_path)
# 读入图片
bgr_img=cv2.imdecode(np.fromfile(face_file_path,dtype=np.int8),-1)
# bgr_img = cv2.imread(face_file_path)
if bgr_img is None:
print("Sorry, we could not load '{}' as an image".format(face_file_path))
return
# opencv的颜色空间是BGR,需要转为RGB才能用在dlib中
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
# bgr_img = cv2.imread(face_file_path)
if(rgb_img.shape[0]<2000):
scale = 3000.0/rgb_img.shape[1]
rgb_img = cv2.resize(rgb_img,(3000,int(rgb_img.shape[0]/(rgb_img.shape[1])*3000)))
# opencv的颜色空间是BGR,需要转为RGB才能用在dlib中
# rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
# 检测图片中的人脸
dets = detector(rgb_img, 1)
# (top, right, bottom, left) 803 982 892 892
# (left,top, right, bottom) 892 803 982 892
# 检测到的人脸数量
faceNum = len(dets)
print(faceNum)
if faceNum == 0:
print("Sorry, there were no faces found in '{}'".format(face_file_path))
return
face_locations = []
for det in dets:
face_locations.append((det.top(),det.right(),det.bottom(),det.left()))
faceDic = {}
faceDic['faceNum'] = faceNum
face_landmarks = face_recognition.face_landmarks(rgb_img,face_locations) #72个点
face_encodings = face_recognition.face_encodings(rgb_img,face_locations)
# 识别人脸特征点,并保存下来
faces = dlib.full_object_detections()
for det in dets:
faces.append(sp(rgb_img, det))
# 人脸对齐
images = dlib.get_face_chips(rgb_img, faces, size=320)
# 显示计数,按照这个计数创建窗口
image_cnt = 0
# 显示对齐结果
for image in images:
image_cnt += 1
cv_rgb_image = np.array(image).astype(np.uint8)# 先转换为numpy数组
cv_bgr_image = cv2.cvtColor(cv_rgb_image, cv2.COLOR_RGB2BGR)# opencv下颜色空间为bgr,所以从rgb转换为bgr
print("正在保存图片 :" + str(image_cnt)+'.jpg')
cv2.imwrite('./'+str(image_cnt)+'.jpg',cv_bgr_image)
# face_file_path = 'D:/py/My_work/6_27_facebook/mtcnn-keras-master/img1/M/静.jpg'# 要使用的图片,图片放在当前文件夹中
# face_file_path = '../face/d/静.jpg'# 要使用的图片,图片放在当前文件夹中
face_file_path = '../face/9.jpg'# 要使用的图片,图片放在当前文件夹中
getdemo(face_file_path)
print("写入完毕..")
|
u19900101/ImgManage
|
pythonModule/python/saveFace.py
|
saveFace.py
|
py
| 2,892 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dlib.shape_predictor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imdecode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_landmarks",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_encodings",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "dlib.full_object_detections",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "dlib.get_face_chips",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 67,
"usage_type": "call"
}
] |
70777898428
|
import torch
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from torch import optim, nn
from DQN import DQN
import torch.nn.functional as F
class Agent:
def __init__(self, input_size, output_size, device='cpu', learning_rate= 0.001, gamma=0.99, epsilon=0.6, epsilon_min=0.01, epsilon_decay=0.9995,batch_size=32,memory_size=100):
self.device = device
self.output_size = output_size
self.policy_net = DQN(input_size, output_size).to(device)
self.target_net = DQN(input_size, output_size).to(device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=learning_rate, weight_decay=0.0001)
self.memory = []
self.batch_size = batch_size
self.memory_size = memory_size
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.lossfn = nn.MSELoss()
self.history = {'loss':[]}
def make_action(self, state):
if np.random.rand() < self.epsilon:
return np.random.choice(self.output_size)
with torch.no_grad():
state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
q_values = self.policy_net(state_tensor)
best_action, best_action_index = torch.max(q_values[0], 1)
action = best_action_index.item()
return action
def make_eval_action(self,state):
with torch.no_grad():
state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
q_values = self.policy_net(state_tensor)
best_action, best_action_index = torch.max(q_values[0], 1)
action = best_action_index.item()
return action
def add_experience(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if len(self.memory) > self.memory_size:
self.memory.pop(0)
def split_batch(self,batch):
states = []
actions = []
rewards = []
next_states = []
dones = []
for experience in batch:
states.append(self.memory[experience][0])
actions.append(self.memory[experience][1])
rewards.append(self.memory[experience][2])
next_states.append(self.memory[experience][3])
dones.append(self.memory[experience][4])
return np.array(states),np.array(actions),np.array(rewards),np.array(next_states),np.array(dones)
def update_policy(self):
self.policy_net.train()
if len(self.memory) < self.batch_size:
return
batch = np.random.choice(len(self.memory), self.batch_size, replace=False)
states,actions,rewards,next_states,dones = self.split_batch(batch)
state_batch = torch.FloatTensor(states).to(self.device)
action_batch = torch.LongTensor(actions).to(self.device)
reward_batch = torch.FloatTensor(rewards).to(self.device)
next_state_batch = torch.FloatTensor(next_states).to(self.device)
done_batch = torch.FloatTensor(dones).to(self.device)
q_values = self.policy_net(state_batch).squeeze(1).gather(1, action_batch.unsqueeze(1)).squeeze(1)
with torch.no_grad():
next_q_values = self.target_net(next_state_batch).squeeze(1)
next_q_values,_ = torch.max(next_q_values,1)
expected_q_values = (next_q_values * self.gamma) * (1 - done_batch) + reward_batch
loss = self.lossfn(q_values, expected_q_values)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
self.history['loss'].append(loss.item())
def update_target(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def store_transition(self, transition):
self.memory.append(transition)
def __len__(self):
return len(self.memory)
def save_model(self):
self.policy_net.save_model()
def update_target_model(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def set_mode_eval(self):
self.policy_net.eval()
def set_mode_train(self):
self.policy_net.train()
|
stefanos50/DQN-Trading-Agent
|
Agent.py
|
Agent.py
|
py
| 4,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "DQN.DQN",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "DQN.DQN",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.random.rand",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 82,
"usage_type": "call"
}
] |
33208629801
|
from django.shortcuts import render
from django.views.generic.base import View
from .models import course
from pure_pagination import Paginator, PageNotAnInteger, EmptyPage
# Create your views here.
class CourseListView(View):
def get(self, request):
all_course = course.objects.all()
fav_course = all_course.order_by('-fav_numbers')[:2]
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# sort page
sort = request.GET.get('sort', '')
if sort:
if sort == 'hot':
all_course = all_course.order_by('-fav_numbers')
print('all orgs by students', all_course)
if sort == 'students':
all_course = all_course.order_by('-students')
else:
all_course = all_course.order_by('-add_time')
# page seprate
p = Paginator(all_course, 5, request=request)
courses_by_page = p.page(page)
course_number = all_course.count()
return render(request, 'course-list.html', {
'all_course': courses_by_page,
'fav_course': fav_course,
'course_number': course_number,
'sort': sort
})
def post(self, request):
pass
class CourseDetailView(View):
def get(self, request, course_id):
course_info = course.objects.filter(id=course_id)[0]
tag = course_info.tag
print('what tag is ', tag)
course_org = course_info.courseOrg
teacher_num = course_org.teacher_set.all().count()
all_related_courses = course.objects.filter(tag=tag)[:1]
print('all related courses %s' % all_related_courses)
return render(request, 'course-detail.html', {
'course_info': course_info,
'course_org': course_org,
'teacher_num': teacher_num,
'related_courses': all_related_courses
})
|
LittleBirdLiu/MXonline_Task
|
apps/course/views.py
|
views.py
|
py
| 1,955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.base.View",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.course.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.course.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.course",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pure_pagination.PageNotAnInteger",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pure_pagination.Paginator",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.course.objects.filter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.course.objects",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "models.course",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.course.objects.filter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "models.course.objects",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "models.course",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 53,
"usage_type": "call"
}
] |
18842937658
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import random
from collections import deque
import gym
from gym.envs.registration import register
import math
import DQN as dqn
register(
id='CartPole-v1565',
entry_point='gym.envs.classic_control:CartPoleEnv',
# 'wrapper_config.TimeLimit.max_episode_steps' limits maximum step
tags={'wrapper_config.TimeLimit.max_episode_steps': 10001},
reward_threshold=-100
)
env = gym.make('CartPole-v1565')
# input_size = 4, output_size = 2
input_size = env.observation_space.shape[0]
output_size = env.action_space.n
dis = 0.9
REPLAY_MEMORY = 50000
epsilon = 1.0
epsilon_min = 0.01
epsilon_decay = 0.995
def replay_train(mainDQN, targetDQN, train_batch):
x_stack = np.empty(0).reshape(0, input_size)
y_stack = np.empty(0).reshape(0, output_size)
for state, action, reward, next_state, done in train_batch:
Q = mainDQN.predict(state)
if done:
Q[0, action] = reward
else:
Q[0, action] = reward + dis * np.max(targetDQN.predict(next_state))
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state])
return mainDQN.update(x_stack, y_stack)
def get_copy_var_ops(*, dest_scope_name="target", src_scope_name="main"):
op_holder = []
src_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)
dest_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
op_holder.append(dest_var.assign(src_var.value()))
return op_holder
def get_epsilon(t):
return max(epsilon_min, min(epsilon, 1.0 - math.log10((t+1) * epsilon_decay)))
def bot_play(mainDQN):
s = env.reset()
reward_sum = 0
done = False
while not done:
env.render()
a = np.argmax(mainDQN.predict(s))
s, reward, done, _ = env.step(a)
reward_sum += reward
print("Total score: {}".format(reward_sum))
def main():
max_episodes = 5000
replay_buffer = deque()
epsilon = 1.0
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, input_size, output_size, name="main")
targetDQN = dqn.DQN(sess, input_size, output_size, name="target")
tf.global_variables_initializer().run()
copy_ops = get_copy_var_ops(dest_scope_name="target", src_scope_name="main")
sess.run(copy_ops)
steps = []
for episode in range(max_episodes):
e = get_epsilon(episode)
done = False
step_count = 0
state = env.reset()
while not done:
if np.random.rand(1) < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_state, reward, done, _ = env.step(action)
replay_buffer.append((state, action, reward, next_state, done))
if len(replay_buffer) > REPLAY_MEMORY:
# popleft : return left value and pop it
replay_buffer.popleft()
state = next_state
step_count += 1
if epsilon > epsilon_min:
epsilon *= epsilon_decay
if step_count > 10000:
break
print("Episode: {} steps: {}".format(episode, step_count))
steps.append(step_count)
# if recent 10 episodes' steps mean > 300, break -> success
if len(steps) > 10:
steps.pop(0)
if np.mean(steps, axis=0) > 300:
break
if episode % 10 == 1:
for _ in range(50):
minibatch = random.sample(replay_buffer, 10)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
print("Loss: ", loss)
sess.run(copy_ops)
mainDQN.save()
targetDQN.save()
for _ in range(10):
bot_play(mainDQN)
if __name__ == "__main__":
main()
|
craclog/DQN_Cartpole
|
DQN_Cartpole.py
|
DQN_Cartpole.py
|
py
| 4,122 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "gym.envs.registration.register",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphKeys",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphKeys",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "math.log10",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "DQN.DQN",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "DQN.DQN",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 129,
"usage_type": "call"
}
] |
30918805074
|
"""
Template for generic Benchmark Test Case Workflow
"""
import sys
import json
import copy
from datetime import datetime
import pandas as pd
def build_iterator(**kwargs):
"""
For building the iterator of the benchmark
"""
iterator = [(2,'dummy'), (2, 'dummy2'), (4, 'dummy'), (2, 'dummy4')]
return iterator
def run_code(iterator_step, repetitions, stage_bench, **kwargs):
"""
For configuration and execution of the benchmark kernel.
Parameters
----------
iterator_step : tuple
tuple with elements from iterator built from build_iterator.
repetitions : list
number of repetitions for each execution
stage_bench : str
benchmark stage. Only: benchmark, pre-benchamrk
kwargs : keyword arguments
for configuration of the benchmark kernel
Returns
_______
metrics : pandas DataFrame
DataFrame with the desired metrics obtained for the integral computation
save_name : string
Desired name for saving the results of the execution
"""
# if n_qbits is None:
# raise ValueError("n_qbits CAN NOT BE None")
if stage_bench not in ['benchmark', 'pre-benchmark']:
raise ValueError(
"Valid values for stage_bench: benchmark or pre-benchmark'")
if repetitions is None:
raise ValueError("samples CAN NOT BE None")
#Here the code for configuring and execute the benchmark kernel
variable_0 = iterator_step[0]
variable_1 = iterator_step[1]
# Loop over the BTC step repetitions
for i in range(repetitions):
continue
metrics = pd.DataFrame()
# Deals with the file names for storing metrics
if stage_bench == 'pre-benchmark':
# Name for storing Pre-Benchmark results
save_name = "pre_benchmark.csv"
if stage_bench == 'benchmark':
# Name for storing Benchmark results
save_name = kwargs.get('csv_results')
#save_name = "pre_benchmark_step_{}.csv".format(n_qbits)
return metrics, save_name
def compute_samples(**kwargs):
"""
This functions computes the number of executions of the benchmark
for assure an error r with a confidence of alpha
Parameters
----------
kwargs : keyword arguments
For configuring the sampling computation
Returns
_______
samples : pandas DataFrame
DataFrame with the number of executions for each integration interval
"""
#Configuration for sampling computations
#Desired Confidence level
alpha = kwargs.get("alpha", 0.05)
#Code for computing the number of samples for getting the desired
#statististical significance. Depends on benchmark kernel
samples_ = pd.Series([100, 100])
samples_.name = "samples"
#If user wants limit the number of samples
#Minimum and Maximum number of samples
min_meas = kwargs.get("min_meas", 5)
max_meas = kwargs.get("max_meas", None)
samples_.clip(upper=max_meas, lower=min_meas, inplace=True)
return list(samples_)
def summarize_results(**kwargs):
"""
Create summary with statistics
"""
folder = kwargs.get("saving_folder")
csv_results = kwargs.get("csv_results")
#Code for summarize the benchamark results. Depending of the
#kernel of the benchmark
results = pd.DataFrame()
return results
class KERNEL_BENCHMARK:
"""
Class for execute a Kernerl benchmark
"""
def __init__(self, **kwargs):
"""
Method for initializing the class
"""
#Configurtion of benchmarked algorithm or routine
self.kwargs = kwargs
#Benchmark Configuration
#Repetitions for pre benchmark step
self.pre_samples = self.kwargs.get("pre_samples", 10)
#Saving pre benchmark step results
self.pre_save = self.kwargs.get("pre_save", True)
#For executing or not the benchmark step
self.pre_benchmark = self.kwargs.get("pre_benchmark", True)
#Name for saving the pre benchmark step results
self.save_name = self.kwargs.get("save_name", None)
#NNumber of qbits
self.list_of_qbits = self.kwargs.get("list_of_qbits", [4])
save_type = self.kwargs.get("save_append", True)
if save_type:
self.save_type = 'a'
else:
self.save_type = 'w'
#Create the iterator
self.iterator = build_iterator(**self.kwargs)
#Configure names for CSV files
self.saving_folder = self.kwargs.get("saving_folder")
self.benchmark_times = self.saving_folder + \
self.kwargs.get("benchmark_times")
self.csv_results = self.saving_folder + \
self.kwargs.get("csv_results")
self.summary_results = self.saving_folder + \
self.kwargs.get("summary_results")
#Attributes for metrics
self.pre_metrics = None
self.metrics = None
def save(self, save, save_name, input_pdf, save_mode):
"""
For saving panda DataFrames to csvs
Parameters
----------
save: bool
For saving or not
save_nam: str
name for file
input_pdf: pandas DataFrame
save_mode: str
saving mode: overwrite (w) or append (a)
"""
if save:
with open(save_name, save_mode) as f_pointer:
input_pdf.to_csv(
f_pointer,
mode=save_mode,
header=f_pointer.tell() == 0,
sep=';'
)
def exe(self):
"""
Execute complete Benchmark WorkFlow
"""
start_time = datetime.now().astimezone().isoformat()
for step_iterator in self.iterator:
#print("n_qbits: {}".format(n_qbits))
if self.pre_benchmark:
print("\t Executing Pre-Benchmark")
#Pre benchmark step
pre_metrics, pre_save_name = run_code(
step_iterator, self.pre_samples, 'pre-benchmark',
**self.kwargs
)
#For saving pre-benchmark step results
pre_save_name = self.saving_folder + pre_save_name
self.save(self.pre_save, pre_save_name, pre_metrics, "w")
#Using pre benchmark results for computing the number of
#repetitions
self.kwargs.update({"pre_metrics": pre_metrics})
#Compute needed samples for desired
#statistical significance
samples_ = compute_samples(**self.kwargs)
print("\t Executing Benchmark Step")
print("\t step samples: {}".format(samples_))
metrics, save_name = run_code(
step_iterator, samples_, 'benchmark', **self.kwargs
)
save_name = self.saving_folder + save_name
self.save(self.save, save_name, metrics, self.save_type)
end_time = datetime.now().astimezone().isoformat()
pdf_times = pd.DataFrame(
[start_time, end_time],
index=["StartTime", "EndTime"]
).T
#Saving Time Info
pdf_times.to_csv(self.benchmark_times)
#Summarize Results
results = summarize_results(**self.kwargs)
results.to_csv(self.summary_results)
if __name__ == "__main__":
import os
import shutil
benchmark_arguments = {
#Pre benchmark sttuff
"pre_benchmark": False,
"pre_samples": 10,
"pre_save": True,
#Saving stuff
"saving_folder": "./Results/",
"benchmark_times": "kernel_times_benchmark.csv",
"csv_results": "kernel_benchmark.csv",
"summary_results": "kernel_SummaryResults.csv",
#Computing Repetitions stuff
"alpha": 0.05,
"min_meas": 5,
"max_meas": 10,
#List number of qubits tested
"list_of_qbits": [4],#, 6, 8],
}
kernel_bench = KERNEL_BENCHMARK(**benchmark_arguments)
kernel_bench.exe()
|
NEASQC/WP3_Benchmark
|
tnbs/templates/my_benchmark_execution.py
|
my_benchmark_execution.py
|
py
| 8,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 239,
"usage_type": "call"
}
] |
5792679797
|
import json
import os
import magic
from io import BytesIO
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from django.db import models
from django.db.models.fields.related import ForeignObjectRel
from ..fields import JSONField
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from hvad.models import TranslatableModel, TranslatedFields
from PIL import Image
class BaseMediaFolder(TranslatableModel):
translations = TranslatedFields(
description=models.CharField(max_length=200, blank=True, null=True),
title=models.CharField(max_length=200, blank=True, null=True),
)
slug = models.SlugField()
creation_date = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
icon = models.ForeignKey(
"camomilla.Media",
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=_("Image cover"),
)
path = models.TextField(blank=True, null=True)
updir = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="child_folders",
null=True,
blank=True,
)
class Meta:
abstract = True
def update_childs(self):
for folder in self.child_folders.all():
folder.save()
def save(self, *args, **kwargs):
if self.updir:
if self.updir.id == self.id:
raise ValidationError({"updir": "Unvalid parent"})
self.path = "{0}/{1}".format(self.updir.path, self.slug)
else:
self.path = "/{0}".format(self.slug)
super(BaseMediaFolder, self).save(*args, **kwargs)
self.update_childs()
def __str__(self):
to_string = self.slug
if self.title:
to_string += " - " + self.title
return to_string
class MediaFolder(BaseMediaFolder):
translations = TranslatedFields()
class Media(TranslatableModel):
translations = TranslatedFields(
alt_text=models.CharField(max_length=200, blank=True, null=True),
title=models.CharField(max_length=200, blank=True, null=True),
description=models.TextField(blank=True, null=True),
)
file = models.FileField()
thumbnail = models.ImageField(
upload_to=getattr(settings, "THUMB_FOLDER", "thumbnails"),
max_length=500,
null=True,
blank=True,
)
created = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=200, blank=True, null=True)
size = models.IntegerField(default=0, blank=True, null=True)
mime_type = models.CharField(max_length=128, blank=True, null=True)
image_props = JSONField(default=dict, blank=True)
folder = models.ForeignKey(
MediaFolder,
null=True,
blank=True,
related_name="media_folder",
on_delete=models.CASCADE,
)
@property
def path(self):
return "%s/%s" % (self.folder.path, self.name)
@property
def is_image(self):
return self.mime_type and self.mime_type.startswith("image")
def image_preview(self):
if self.file:
return mark_safe('<img src="{0}" />'.format(self.file.url))
def image_thumb_preview(self):
if self.thumbnail:
return mark_safe('<img src="{0}" />'.format(self.thumbnail.url))
image_preview.short_description = _("Preview")
image_thumb_preview.short_description = _("Thumbnail")
class Meta:
ordering = ["-pk"]
def regenerate_thumbnail(self):
if self.file:
self._make_thumbnail()
def get_foreign_fields(self):
return [
field.get_accessor_name()
for field in self._meta.get_fields()
if issubclass(type(field), ForeignObjectRel)
]
@property
def json_repr(self):
json_r = {
"id": self.pk,
"thumbnail": "" if not self.is_image else self.thumbnail.url,
"label": self.__str__(),
}
return json.dumps(json_r)
def _make_thumbnail(self):
try:
fh = storage.open(self.file.name, "rb")
self.mime_type = magic.from_buffer(fh.read(2048), mime=True)
except FileNotFoundError as ex:
print(ex)
self.image_props = {}
self.mime_type = ""
return False
try:
orig_image = Image.open(fh)
image = orig_image.copy()
self.image_props = {
"width": orig_image.width,
"height": orig_image.height,
"format": orig_image.format,
"mode": orig_image.mode,
}
except Exception as ex:
print(ex)
return False
try:
image.thumbnail(
(
getattr(settings, "CAMOMILLA_THUMBNAIL_WIDTH", 50),
getattr(settings, "CAMOMILLA_THUMBNAIL_HEIGHT", 50),
),
Image.ANTIALIAS,
)
fh.close()
# Path to save to, name, and extension
thumb_name, thumb_extension = os.path.splitext(self.file.name)
thumb_extension = thumb_extension.lower()
thumb_filename = thumb_name + "_thumb" + thumb_extension
temp_thumb = BytesIO()
image.save(temp_thumb, "PNG", optimize=True)
temp_thumb.seek(0)
# Load a ContentFile into the thumbnail field so it gets saved
self.thumbnail.save(
thumb_filename, ContentFile(temp_thumb.read()), save=False
)
temp_thumb.close()
except Exception:
return False
return True
def _remove_file(self):
if self.file:
file_to_remove = os.path.join(settings.MEDIA_ROOT, self.file.name)
if os.path.isfile(file_to_remove):
os.remove(file_to_remove)
def _remove_thumbnail(self):
if self.thumbnail:
file_to_remove = os.path.join(settings.MEDIA_ROOT, self.thumbnail.name)
if os.path.isfile(file_to_remove):
os.remove(file_to_remove)
def _get_file_size(self):
if self.file:
file_to_calc = os.path.join(settings.MEDIA_ROOT, self.file.name)
if os.path.isfile(file_to_calc):
return self.file.size
else:
return 0
def __str__(self):
if self.name:
return self.name
return self.file.name
@receiver(post_save, sender=Media, dispatch_uid="make thumbnails")
def update_media(sender, instance, **kwargs):
instance._remove_thumbnail()
instance._make_thumbnail()
Media.objects.filter(pk=instance.pk).update(
size=instance._get_file_size(),
thumbnail=instance.thumbnail,
mime_type=instance.mime_type,
image_props=instance.image_props,
)
@receiver(pre_delete, sender=Media, dispatch_uid="make thumbnails")
def delete_media_files(sender, instance, **kwargs):
instance._remove_thumbnail()
instance._remove_file()
|
lotrekagency/camomilla
|
camomilla/models/media.py
|
media.py
|
py
| 7,378 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "hvad.models.TranslatableModel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "hvad.models.TranslatedFields",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "hvad.models.TranslatedFields",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "hvad.models.TranslatableModel",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "hvad.models.TranslatedFields",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "django.db.models.FileField",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "fields.JSONField",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "django.db.models.fields.related.ForeignObjectRel",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "json.dumps",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.open",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "magic.from_buffer",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 168,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 169,
"usage_type": "argument"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.core.files.base.ContentFile",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 221,
"usage_type": "argument"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_delete",
"line_number": 233,
"usage_type": "argument"
}
] |
31180641489
|
import dash
import math
from flask import Markup
from flask import render_template
import matplotlib.pyplot as plt
from flask import Flask, jsonify, request
from dash.dependencies import Output, Event, Input
import dash_core_components as dcc
import dash_html_components as html
import plotly
import random
import plotly.graph_objs as go
from collections import deque
import sqlite3
import pandas as pd
app=Flask(__name__)
@app.route('/alltrends',methods=['GET'])
def all_list():
print("inter")
conn = sqlite3.connect('twitter4.db')
c = conn.cursor()
df = pd.read_sql("SELECT * FROM world_trend_data", conn)
out = df.to_json(orient='records')[1:-1].replace('},{', '} {')
print("compl")
print(out)
return out
@app.route('/<string:name>',methods=['GET'])
def bar(name):
conn=sqlite3.connect('twitter4.db')
c=conn.cursor()
pf=pd.read_sql("SELECT name,tweet_volume FROM "+name,conn)
name=list(pf.name)
tweet_volume=list(pf.tweet_volume)
labels=[]
values=[]
#print(tweet_volume)
for x in range(0,len(name)):
if math.isnan(tweet_volume[x])==False:
labels.append(name[x])
values.append(tweet_volume[x])
print(labels)
print(values)
#labels = ["January","February","March","April","May","June","July","August"]
#values = [10,9,8,7,6,4,7,8]
return render_template('bar.html', values=values, labels=labels)
@app.route('/place/<string:name>',methods=['GET'])
def all_data(name):
conn=sqlite3.connect('twitter4.db')
c=conn.cursor()
df=pd.read_sql("SELECT name,query,tweet_volume FROM "+name,conn)
out = df.to_json(orient='records')[1:-1].replace('},{', '} {')
return out
if __name__=='__main__':
app.run(debug=True,port=8080)
|
ravirajsingh-knit/real-time-twitter-sentiment-analysis
|
main task/api2.py
|
api2.py
|
py
| 1,656 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 54,
"usage_type": "call"
}
] |
22426413086
|
from flask import Flask, request, jsonify
import requests
import json
import os
import feedparser
from dotenv import load_dotenv
import random
from datetime import date
load_dotenv()
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def root_post():
print(request)
return jsonify(text="リクエスト成功")
@app.route("/listening", methods=["GET", "POST"])
def hear():
# https://werkzeug.palletsprojects.com/en/0.15.x/wrappers/#werkzeug.wrappers.json.JSONMixin.get_json
request_obj = request.get_json()
if request_obj is None:
return jsonify({})
if "challenge" in request_obj:
challenge_token = request_obj["challenge"]
return jsonify(challenge=challenge_token)
if "event" in request_obj:
event = request_obj["event"]
print(event)
bot_id = event.get("bot_id")
if bot_id == "B010WR2FE2C":
return jsonify({})
endpoint = os.environ["SLACK_WEBHOOK"]
if "text" not in request_obj["event"]:
return jsonify({})
text = request_obj["event"]["text"]
if "<@U010KB4S65R>" not in text:
# メンションじゃない場合は無視する
return jsonify({})
if "占い" in text or "うらない" in text or "うらなって" in text or "占って":
request_date = date.today().isoformat().replace("-", "/")
fortune_endpoint = (
f"http://api.jugemkey.jp/api/horoscope/free/{request_date}"
)
fortune_res = requests.get(fortune_endpoint)
fortune = fortune_res.json()
if fortune_res.status_code != 200 or fortune is None:
payload = {"text": f"<@{user}> 占いに失敗しました"}
requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
# fortune["horoscope"]["2020/03/28"] をするコード
sign_list = fortune.get("horoscope", {}).get(request_date, {})
if sign_list == {}:
payload = {"text": f"<@{user}> 占いに失敗しました"}
requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
messages = [sign["sign"] + ": " + sign["content"] for sign in sign_list]
messages.append("powerd by JugemKey 【PR】原宿占い館 塔里木")
payload = {"text": "\n".join(messages)}
res = requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
user = event["user"]
RSS_URL = "https://b.hatena.ne.jp/hotentry/it.rss"
d = feedparser.parse(RSS_URL)
text = "\n".join(
[f"{entry.title}: {entry.link}" for entry in random.sample(d.entries, 3)]
)
payload = {"text": f"<@{user}> {text}"}
res = requests.post(endpoint, data=json.dumps(payload))
return jsonify({})
|
tamanobi/benri-slackbot
|
index.py
|
index.py
|
py
| 2,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "feedparser.parse",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 80,
"usage_type": "call"
}
] |
23561493561
|
import scipy
import datetime
import matplotlib.pyplot as plt
import sys
from loader import DataLoader
import numpy as np
import os
from keras.datasets import mnist
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from models.refiner import build_refiner
from models.classifier import build_classifier
from models.discriminator import build_discriminator, build_feature_discriminator
from models.encoder import build_encoder
class CGAN():
def __init__(self):
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.n_features = 128
self.n_classes = 31
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols), n_classes=self.n_classes)
optimizer = Adam(0.0002, 0.5)
self.D_R = build_discriminator(self.img_shape)
self.D_F = build_feature_discriminator(self.n_features)
self.D_R.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.D_F.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.Refiner = build_refiner(self.img_shape, self.channels)
self.Feature = build_encoder(self.img_shape, self.n_features)
self.Classifier = build_classifier(self.n_features, self.n_classes)
self.D_R.trainable = False
self.D_F.trainable = False
self.Classifier.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.Classifier.trainable = False
self.GAN_1 = Sequential()
self.GAN_1.add(self.Refiner)
self.GAN_1.add(self.D_R)
self.GAN_1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.GAN_2 = Sequential()
self.GAN_2.add(self.Refiner)
self.GAN_2.add(self.Feature)
self.GAN_2.add(self.D_F)
self.GAN_2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.GAN_3 = Sequential()
self.GAN_3.add(self.Refiner)
self.GAN_3.add(self.Feature)
self.GAN_3.add(self.Classifier)
self.GAN_3.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
def train(self, epochs, batch_size=1, interval=50):
start_time = datetime.datetime.now()
valid = np.ones((batch_size,))
refined = np.zeros((batch_size,))
for epoch in range(epochs):
for batch_i, (imgs_sim, imgs_target, classes) in enumerate(self.data_loader.load_batch(batch_size)):
imgs_refined = self.Refiner.predict(imgs_sim)
feature_sim = self.Feature.predict(imgs_sim)
feature_target = self.Feature.predict(imgs_target)
feature_refined = self.Feature.predict(imgs_refined)
dimg_loss_real = self.D_R.train_on_batch(imgs_target, valid)
dimg_loss_refined = self.D_R.train_on_batch(imgs_refined, refined)
dimg_loss = 0.5 * np.add(dimg_loss_real, dimg_loss_refined)
dfeature_loss_real = self.D_F.train_on_batch(feature_target, valid)
dfeature_loss_refined = self.D_F.train_on_batch(feature_refined, refined)
dfeature_loss = 0.5 * np.add(dfeature_loss_real, dfeature_loss_refined)
class_loss = self.Classifier.train_on_batch(feature_sim, classes)
gan1_loss = self.GAN_1.train_on_batch(imgs_sim, valid)
gan2_loss = self.GAN_2.train_on_batch(imgs_sim, valid)
gan3_loss = self.GAN_3.train_on_batch(imgs_sim, classes)
elapsed_time = datetime.datetime.now() - start_time
print ("[Epoch %d/%d] [targetatch %d/%d] [DR loss: %f] [DF loss: %f] [C loss: %f] [GAN_1 loss %f] [GAN_2 loss %f] [GAN_3 loss %f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
dimg_loss[0],
dfeature_loss[0],
class_loss[0],
gan1_loss[0],
gan2_loss[0],
gan3_loss[0],
elapsed_time))
if batch_i % interval == 0:
self.sample_images(epoch, batch_i)
def sample_images(self, epoch, batch_i):
os.makedirs('output/', exist_ok=True)
r, c = 1, 3
imgs_sim = self.data_loader.load_data(domain="sim", batch_size=1, is_testing=True)
imgs_target = self.data_loader.load_data(domain="target", batch_size=1, is_testing=True)
imgs_refined = self.Refiner.predict(imgs_sim)
gen_imgs = np.concatenate([imgs_sim, imgs_refined, imgs_target])
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Simulated', 'Refined','Target']
fig, axs = plt.subplots(r, c)
axs[0].imshow(gen_imgs[0])
axs[0].set_title(titles[0])
axs[0].axis('off')
axs[1].imshow(gen_imgs[1])
axs[1].set_title(titles[1])
axs[1].axis('off')
axs[2].imshow(gen_imgs[2])
axs[2].set_title(titles[2])
axs[2].axis('off')
fig.savefig("output/%d_%d.png" % (epoch, batch_i))
plt.close()
if __name__ == '__main__':
cgan = CGAN()
cgan.train(epochs=100, batch_size=8, interval=50)
|
faniyamokhayyeri/C-GAN
|
cgan.py
|
cgan.py
|
py
| 6,395 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "loader.DataLoader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.Adam",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.discriminator.build_discriminator",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.discriminator.build_feature_discriminator",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.refiner.build_refiner",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.encoder.build_encoder",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.classifier.build_classifier",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 146,
"usage_type": "name"
}
] |
12772858510
|
import argparse
import os
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
from .model import (
rnn_regression_model,
rnn_classification_model,
compile_regression_model,
compile_classification_model,
)
from .transform import (
sequence_embedding,
normalize, denormalize,
make_dataset_balanced,
one_hot_encode_classes,
split_train_test_set,
)
from .load import load_rna_structure_dataset, load_rna_nucleotides_dataset
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument('learning_type', choices=['regression', 'classification'])
parser.add_argument('rna_type', choices=['mrna', 'trna', 'rrna'])
parser.add_argument('alphabet', choices=['nucleotides', '2d_structure'])
parser.add_argument('--resume', type=int, default=0)
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_epochs', type=int, default=10)
parser.add_argument('--seed', type=int, default=444)
args = parser.parse_args()
learning_type = args.learning_type
rna_type = args.rna_type
alphabet_type = args.alphabet
learning_rate = args.learning_rate
batch_size = args.batch_size
n_epochs = args.n_epochs
resume = args.resume
seed = args.seed
np.random.seed(seed)
if alphabet_type == 'nucleotides':
alphabet = ['A', 'T', 'G', 'C']
else:
alphabet = ['.', '(', ')']
if learning_type == 'regression':
regression(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed)
else:
classification(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed)
def regression(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed):
alphabet_size = len(alphabet)
metadata_path = f'data/tab/{rna_type}.tab'
sequences_folder = 'data/seq/'
output_folder = os.path.join(os.getcwd(), 'saved_models_rnatemp', f'seed_{seed}')
output_path = os.path.join(output_folder, f'{rna_type}_regression.h5')
try:
os.makedirs(output_folder)
except FileExistsError:
pass
log_dir = os.path.join(os.getcwd(), 'summary_log', f'seed_{seed}', 'regression')
try:
os.makedirs(log_dir)
except FileExistsError:
pass
logger.info('Building model')
model = rnn_regression_model(alphabet_size=alphabet_size, n_lstm=2)
compile_regression_model(model, learning_rate=learning_rate)
if resume > 0:
logger.info(f'Resuming from {output_path}')
model.load_weights(output_path)
logger.info('Loading data')
metadata = pd.read_csv(metadata_path, delimiter='\t')
metadata['category'] = metadata['temp.cat']
y, metadata = make_dataset_balanced(
metadata,
output_col='temp',
)
y = y.astype(np.float32)
if 'A' in alphabet:
sequences = load_rna_nucleotides_dataset(metadata, sequences_folder)
else:
sequences = load_rna_structure_dataset(metadata, sequences_folder)
x = sequence_embedding(sequences, alphabet)
logger.info('Split train and test set')
x_train, y_train, x_test, y_test = split_train_test_set(x, y, test_ratio=0.2)
mean, std = np.mean(y), np.std(y)
y_test_norm = normalize(y_test, mean, std)
y_train_norm = normalize(y_train, mean, std)
initial_epoch = 0
epochs = n_epochs
if resume > 0:
initial_epoch = resume
epochs += initial_epoch
logger.info('Training')
model.fit(
x_train,
y_train_norm,
validation_data=(x_test, y_test_norm),
batch_size=batch_size,
epochs=epochs,
initial_epoch=initial_epoch,
verbose=1,
callbacks=[
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
update_freq='epoch',
embeddings_freq=0,
),
],
)
model.save(output_path)
logger.info(f'Model saved to {output_path}')
def classification(rna_type, alphabet, learning_rate, batch_size, n_epochs, resume, seed):
alphabet_size = len(alphabet)
classes = ['psychrophile', 'mesophile', 'thermophile', 'hyperthermophile']
n_classes = len(classes)
metadata_path = f'data/tab/{rna_type}.tab'
sequences_folder = 'data/seq/'
output_folder = os.path.join(os.getcwd(), 'saved_models_rnatemp', f'seed_{seed}')
output_path = os.path.join(output_folder, f'{rna_type}_classification.h5')
try:
os.makedirs(output_folder)
except FileExistsError:
pass
log_dir = os.path.join(os.getcwd(), 'summary_log', f'seed_{seed}', 'classification')
try:
os.makedirs(log_dir)
except FileExistsError:
pass
logger.info('Building model')
model = rnn_classification_model(alphabet_size=alphabet_size, n_classes=n_classes, n_lstm=2)
compile_classification_model(model, learning_rate=learning_rate)
if resume > 0:
logger.info(f'Resuming from {output_path}')
model.load_weights(output_path)
logger.info('Loading data')
metadata = pd.read_csv(metadata_path, delimiter='\t')
metadata['category'] = metadata['temp.cat']
n_entries_per_class = 153
y_str, metadata = make_dataset_balanced(metadata)
y = one_hot_encode_classes(y_str, classes)
if 'A' in alphabet:
sequences = load_rna_nucleotides_dataset(metadata, sequences_folder)
else:
sequences = load_rna_structure_dataset(metadata, sequences_folder)
x = sequence_embedding(sequences, alphabet)
logger.info('Split train and test set')
x_train, y_train, x_test, y_test = split_train_test_set(x, y, test_ratio=0.2)
initial_epoch = 0
epochs = n_epochs
if resume > 0:
initial_epoch = resume
epochs += initial_epoch
logger.info('Training')
model.fit(
x_train,
y_train,
validation_data=(x_test, y_test),
batch_size=batch_size,
epochs=epochs,
initial_epoch=initial_epoch,
verbose=1,
callbacks=[
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
update_freq='epoch',
embeddings_freq=0,
),
],
)
model.save(output_path)
logger.info(f'Model saved to {output_path}')
if __name__ == '__main__':
main()
|
srom/rna_learn
|
rna_learn/archive/rnatemp_main.py
|
rnatemp_main.py
|
py
| 6,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "model.rnn_regression_model",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "model.compile_regression_model",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "model.load_weights",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "transform.make_dataset_balanced",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "load.load_rna_nucleotides_dataset",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "load.load_rna_structure_dataset",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "transform.sequence_embedding",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "transform.split_train_test_set",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "transform.normalize",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "transform.normalize",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "model.fit",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.TensorBoard",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "model.save",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "model.rnn_classification_model",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "model.compile_classification_model",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "model.load_weights",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "transform.make_dataset_balanced",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "transform.one_hot_encode_classes",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "load.load_rna_nucleotides_dataset",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "load.load_rna_structure_dataset",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "transform.sequence_embedding",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "transform.split_train_test_set",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "model.fit",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.TensorBoard",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "model.save",
"line_number": 220,
"usage_type": "call"
}
] |
7029192101
|
import argparse
import time
import os
import cv2
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torch.autograd import Variable
import models_x
class ImageAdaptive3DModel(nn.Module):
def __init__(self, dim=33):
super().__init__()
self.classifier = models_x.Classifier()
self.lut_0 = models_x.Generator3DLUT_identity()
self.lut_1 = models_x.Generator3DLUT_zero()
self.lut_2 = models_x.Generator3DLUT_zero()
self.trilinear_ = models_x.TrilinearInterpolation()
def load_weights(self, lut_weights="pretrained_models/sRGB/LUTs.pth", classifier_weights="pretrained_models/sRGB/classifier.pth"):
assert os.path.exists(lut_weights), "Unable to find lut weights"
assert os.path.exists(classifier_weights), "Unable to find classifier weights"
classifier_state_dict = torch.load(classifier_weights)
self.classifier.load_state_dict(classifier_state_dict)
luts_state_dict = torch.load(lut_weights)
self.lut_0.load_state_dict(luts_state_dict["0"])
self.lut_1.load_state_dict(luts_state_dict["1"])
self.lut_2.load_state_dict(luts_state_dict["2"])
def forward(self, image_input):
pred = self.classifier(image_input).squeeze()
final_lut = pred[0] * self.lut_0.LUT + pred[1] * self.lut_1.LUT + pred[2] * self.lut_2.LUT
combine_A = image_input.new(image_input.size())
combine_A = self.trilinear_(final_lut, image_input)
return combine_A
class ImageAdaptive3DUnpairedModel(nn.Module):
def __init__(self, dim=33):
super().__init__()
self.classifier = models_x.Classifier_unpaired()
self.lut_0 = models_x.Generator3DLUT_identity()
self.lut_1 = models_x.Generator3DLUT_zero()
self.lut_2 = models_x.Generator3DLUT_zero()
def load_weights(self, lut_weights="pretrained_models/sRGB/LUTs_unpaired.pth", classifier_weights="pretrained_models/sRGB/classifier_unpaired.pth"):
assert os.path.exists(lut_weights), "Unable to find lut weights"
assert os.path.exists(classifier_weights), "Unable to find classifier weights"
classifier_state_dict = torch.load(classifier_weights)
self.classifier.load_state_dict(classifier_state_dict)
luts_state_dict = torch.load(lut_weights)
self.lut_0.load_state_dict(luts_state_dict["0"])
self.lut_1.load_state_dict(luts_state_dict["1"])
self.lut_2.load_state_dict(luts_state_dict["2"])
def forward(self, image_input):
pred = self.classifier(image_input).squeeze()
combine_A = pred[0] * self.lut_0(image_input) + pred[1] * self.lut_1(image_input) + pred[2] * self.lut_2(image_input)
# Standardize because paired model returns (LUT, output)
return None, combine_A
def pre_process(image: np.array, device: str) -> torch.tensor:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.astype(np.float32) / 255.
image = torch.from_numpy(np.ascontiguousarray(np.transpose(image, (2, 0, 1)))).float().unsqueeze(0)
# image = torch.stack([image])
image = image.to(device)
return image
def post_process(output_tensor):
image_rgb = output_tensor.cpu().squeeze().permute(1, 2, 0).numpy()
image_rgb = (image_rgb * 255.0).clip(0, 255).astype(np.uint8)
image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
return image_bgr
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str, required=True, help="Path to input folder containing images")
parser.add_argument("--output_dir", type=str, required=True, help="Path to output folder")
parser.add_argument("--device", type=str, default="cuda:0", help="Device to use e.g. 'cuda:0', 'cuda:1', 'cpu'")
parser.add_argument("--unpaired", action="store_true", help="Evaluate model trained with unpaired data")
args = parser.parse_args()
# Prepare output directory if it doesn't exist
os.makedirs(args.output_dir, exist_ok=True)
# Load model and weights
model = ImageAdaptive3DModel() if not args.unpaired else ImageAdaptive3DUnpairedModel()
model.load_weights()
model.eval()
model.to(args.device)
# Prepare images
image_paths = [os.path.join(args.input_dir, img_path) for img_path in os.listdir(args.input_dir) if img_path[0] != "."]
# Model inference
with torch.no_grad():
description = "Running 3D-LUT..." if not args.unpaired else "Running 3D-LUT(unpaired)..."
for img_path in tqdm(image_paths, total=len(image_paths), desc=description):
in_image = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
model_input = pre_process(in_image, args.device)
_, model_output = model(model_input)
enhanced_image = post_process(model_output)
output_path = os.path.join(args.output_dir, os.path.basename(img_path))
cv2.imwrite(output_path, enhanced_image)
|
shaunhwq/Image-Adaptive-3DLUT
|
demo_3dlut.py
|
demo_3dlut.py
|
py
| 5,091 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models_x.Classifier",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_identity",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models_x.TrilinearInterpolation",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "models_x.Classifier_unpaired",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_identity",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models_x.Generator3DLUT_zero",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 127,
"usage_type": "call"
}
] |
43969738146
|
#!/usr/bin/env python
import argparse
import sys
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation
from CPT_GFFParser import gffSeqFeature, gffWrite
bottomFeatTypes = ["exon", "RBS", "CDS"]
def makeGffFeat(inFeat, num, recName, identifier):
if inFeat.type == "RBS" or (inFeat.type == "regulatory" and "regulatory_class" in inFeat.qualifiers.keys() and inFeat.qualifiers["regulatory_class"][0] == "ribosome_binding_site"):
inFeat.type = "Shine_Dalgarno_sequence"
if "codon_start" in inFeat.qualifiers.keys():
shift = int(inFeat.qualifiers["codon_start"][0]) - 1
else:
shift = "."
if identifier in inFeat.qualifiers.keys():
name = inFeat.qualifiers[identifier][0] + "." + inFeat.type
if num > 0:
name += "." + str(num)
else:
name = recName + "." + inFeat.type + "." + str(num)
outFeat = gffSeqFeature(inFeat.location, inFeat.type, '', inFeat.strand, name, inFeat.qualifiers, None, None, None, shift, 0, "GbkToGff")
outFeat.qualifiers["ID"] = [name]
return outFeat
def main(inFile, makeMRNA, makeGene, identifier, fastaFile, outFile):
ofh = sys.stdout
if outFile:
ofh = outFile
outRec = []
failed = 0
for rec in SeqIO.parse(inFile, "genbank"):
recID = rec.name
if len(str(rec.seq)) > 0:
seqs_pending_writes = True
outSeq = str(rec.seq)
seqLen = len(outSeq)
locBucket = {}
outFeats = []
topTypeDict = {}
seekingParent = []
geneNum = 0
autoGeneNum = 0
for feat in rec.features:
if identifier not in feat.qualifiers.keys(): #Allow metadata features and other features with no ID (Output warning?) - AJC
if feat.type in bottomFeatTypes:
seekingParent.append([feat, [], []]) # [Feature, all parent candidates, strongest parent candidates]
continue
elif feat.type not in topTypeDict.keys():
topTypeDict[feat.type] = 1
else:
topTypeDict[feat.type] += 1
outFeats.append(makeGffFeat(feat, topTypeDict[feat.type], recID, identifier))
continue
elif feat.qualifiers[identifier][0] not in locBucket.keys():
locBucket[feat.qualifiers[identifier][0]] = []
locBucket[feat.qualifiers[identifier][0]].append(feat)
for locus in locBucket.keys():
minLoc = locBucket[locus][0].location.start
maxLoc = locBucket[locus][0].location.end
for feat in locBucket[locus]:
minLoc = min(minLoc, feat.location.start)
maxLoc = max(maxLoc, feat.location.end)
for x in seekingParent:
if x[0].location.start >= minLoc and x[0].location.end <= maxLoc:
x[1].append(locus)
if x[0].location.start == minLoc or x[0].location.end == maxLoc:
x[2].append(locus)
for x in seekingParent: #Reformat to [Feature, Locus, Unused/Free]
if len(x[2]) == 1:
finList = ""
if len(x[1]) > 1:
for loc in x[1]:
if loc != x[2][0]:
finList += loc + ", "
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived. Other, weaker candidate(s) were " + finList[0:-2] + "."
else:
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived."
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
x[0].qualifiers["Notes"].append(finList)
x[1] = x[2][0]
elif len(x[2]) > 1:
candidate = x[2][0] #Arbitrarily choose first one
finList = ""
strongList = ""
for loc in x[2]:
if loc != candidate:
finList += loc + ", "
strongList += loc + ", "
for loc in x[1]:
if loc not in x[2]:
finList += loc + ", "
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived. Other candidate(s) were " + finList[0:-2] + " (Equally strong candidate(s): " + strongList[0:-2] + ")."
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
x[0].qualifiers["Notes"].append(finList)
x[1] = candidate
elif len(x[1]) == 1:
x[1] = x[1][0]
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived."
x[0].qualifiers["Notes"].append(finList)
elif len(x[1]) > 1:
candidate = x[1][0] #Arbitrarily choose first one
finList = ""
for loc in x[1]:
if loc != candidate:
finList += loc + ", "
finList = str(x[0].type) + " had no locus tag set in .gbk file, automatically derived. Other candidates were " + finList[0:-2] + "."
if "Notes" not in x[0].qualifiers.keys():
x[0].qualifiers["Notes"] = []
x[0].qualifiers["Notes"].append(finList)
x[1] = candidate
else:
if makeGene:
sys.stderr.write("Warning: Unable to find potential parent for feature with no " + identifier + " of type " + str(x[0].type) + " at location [" + str(x[0].location.start + 1) + ", " + str(x[0].location.end) + "], creating standalone gene.\n")
autoGeneNum += 1
x[0].source = "GbkToGff"
x[0].score = 0
x[0].shift = 0
if "ID" not in x[0].qualifiers.keys():
x[0].qualifiers["ID"] = [recID + ".standalone_" + x[0].type + "." + str(autoGeneNum)]
tempName = recID + ".derived_Gene." + str(autoGeneNum)
tempQuals = {"ID" : [tempName], "Notes" : ["Gene feature automatically generated by Gbk to GFF conversion"]}
tempGene = gffSeqFeature(FeatureLocation(x[0].location.start, x[0].location.end, x[0].location.strand), 'gene', '', x[0].strand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff")
if makeMRNA:
tempName = recID + ".derived_mRNA." + str(autoGeneNum)
tempQuals = {"ID" : [tempName], "Notes" : ["mRNA feature automatically generated by Gbk to GFF conversion"]}
tempGene.sub_features.append(gffSeqFeature(FeatureLocation(x[0].location.start, x[0].location.end, x[0].location.strand), 'mRNA', '', x[0].strand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff"))
tempGene.sub_features[-1].sub_features.append(x[0])
else:
tempGene.sub_features.append(x[0])
outFeats.append(tempGene)
else:
sys.stderr.write("Warning: Unable to find potential parent for feature with no " + identifier + " of type " + str(x[0].type) + " at location [" + str(x[0].location.start + 1) + ", " + str(x[0].location.end) + "].\n")
if x[0].type not in topTypeDict.keys():
topTypeDict[x[0].type] = 1
else:
topTypeDict[x[0].type] += 1
outFeats.append(makeGffFeat(x[0], topTypeDict[x[0].type], recID, identifier))
for locus in locBucket.keys():
if len(locBucket[locus]) == 1: # No heirarchy to be made
outFeats.append(makeGffFeat(locBucket[locus][0], 0, recID, identifier))
continue
topFeat = None
midFeat = None
bottomFeats = []
typeDict = {}
minLoc = locBucket[locus][0].location.start
maxLoc = locBucket[locus][0].location.end
geneNum += 1
for feat in locBucket[locus]:
# If we want to make our own top-level feat?
minLoc = min(minLoc, feat.location.start)
maxLoc = max(maxLoc, feat.location.end)
# Gene->mRNA->CDS included as example, to add other feature-heirarchys in the appropriate slot
if feat.type in ['gene']:
if not topFeat:
topFeat = feat
# Else handle multiple top features
elif feat.type in ['mRNA', 'tRNA', 'rRNA']:
if not midFeat:
midFeat = feat
# Else handle multiple mid feats (May need another elif type-in-list statement if we actually expect a list of mid feats)
else:
if feat.type not in typeDict.keys():
typeDict[feat.type] = 1
else:
typeDict[feat.type] += 1
bottomFeats.append(feat)
for x in seekingParent:
if type(x[1]) != "list" and locus == x[1]:
x[0].qualifiers[identifier] = [locus]
bottomFeats.append(x[0])
if x[0].type not in typeDict.keys():
typeDict[x[0].type] = 1
else:
typeDict[x[0].type] += 1
#if not topFeat: # Make our own top-level feature based off minLoc, maxLoc bounds
for x in typeDict.keys(): # If only 1, set it to 0 so we don't append a number to the name
if typeDict[x] == 1: # Else, set to 1 so that we count up as we encounter the features
typeDict[x] = 0
else:
typeDict[x] = 1
if not topFeat:
if makeGene:
if midFeat:
possibleStrand = midFeat.strand
else:
possibleStrand = bottomFeats[0].strand
tempName = recID + ".gene." + str(geneNum)
tempQuals = {identifier : [locus], "ID" : [tempName], "Notes" : ["Gene feature automatically generated by Gbk to GFF conversion"]}
topFeat = gffSeqFeature(FeatureLocation(minLoc, maxLoc, possibleStrand), 'gene', '', possibleStrand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff")
else:
sys.stderr.write("Unable to create a feature heirarchy at location [%d, %d] with features: \n" % (minLoc, maxLoc))
for x in locBucket[locus]:
sys.stderr.write(str(x))
sys.stderr.write('\n')
failed = 1
continue
outFeats.append(makeGffFeat(topFeat, 0, recID, identifier))
if not midFeat and topFeat.type == "gene" and makeMRNA:
if identifier in topFeat.qualifiers.keys():
tempName = topFeat.qualifiers[identifier][0] + ".mRNA"
tempQuals = {identifier : topFeat.qualifiers[identifier], "ID" : [tempName], "Notes" : ["mRNA feature automatically generated by Gbk to GFF conversion"]}
else:
tempName = outFeats[-1].ID + ".mRNA"
tempQuals = {identifier : topFeat.qualifiers[identifier], "ID" : [tempName], "Notes" : ["mRNA feature automatically generated by Gbk to GFF conversion"]}
midFeat = gffSeqFeature(FeatureLocation(minLoc, maxLoc, topFeat.strand), 'mRNA', '', topFeat.strand, tempName, tempQuals, None, None, None, ".", 0, "GbkToGff")
if midFeat: # Again, need a new if statement if we want to handle multiple mid-tier features
outFeats[-1].sub_features.append(makeGffFeat(midFeat, 0, recID, identifier))
outFeats[-1].sub_features[-1].qualifiers["Parent"] = [outFeats[-1].id]
for x in bottomFeats:
typeDict[x.type] += 1
outFeats[-1].sub_features[-1].sub_features.append(makeGffFeat(x, typeDict[x.type], recID, identifier))
outFeats[-1].sub_features[-1].sub_features[-1].qualifiers["Parent"] = [outFeats[-1].sub_features[-1].id]
else: # No midFeat, append bottom feats directly to top feats
for x in bottomFeats:
typeDict[x.type] += 1
outFeats[-1].sub_features.append(makeGffFeat(x, typeDict[x.type], recID, identifier))
outFeats[-1].sub_features[-1].qualifiers["Parent"] = [outFeats[-1].id]
outRec.append(SeqRecord(rec.seq, recID, rec.name, rec.description, rec.dbxrefs, sorted(outFeats, key=lambda x: x.location.start), rec.annotations, rec.letter_annotations))
SeqIO.write([outRec[-1]], fastaFile, "fasta")
gffWrite(outRec, ofh)
exit(failed) # 0 if all features handled, 1 if unable to handle some
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Biopython solution to Gbk to GFF conversion')
parser.add_argument('inFile', type=argparse.FileType("r"), help='Path to an input GBK file' )
parser.add_argument('--makeMRNA', action="store_true", required=False, help="Automatically create mRNA features")
parser.add_argument('--makeGene', action="store_true", required=False, help="Automatically create missing Gene features")
parser.add_argument('--identifier', type=str, default="locus_tag", required=False, help="Qualifier to derive ID property from")
parser.add_argument('--fastaFile', type=argparse.FileType("w"), help='Fasta output for sequences' )
parser.add_argument('--outFile', type=argparse.FileType("w"), help='GFF feature output' )
args = parser.parse_args()
main(**vars(args))
|
TAMU-CPT/galaxy-tools
|
tools/gbk/gbk_to_gff3.py
|
gbk_to_gff3.py
|
py
| 13,589 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "CPT_GFFParser.gffSeqFeature",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "Bio.SeqFeature.FeatureLocation",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "Bio.SeqRecord.SeqRecord",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO.write",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "CPT_GFFParser.gffWrite",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 264,
"usage_type": "call"
}
] |
31235810811
|
from django.urls import path, include
from rest_framework import routers
from aluraflix.views import VideoViewSet, CategoriaViewSet, CategoriaVideosViewSet, VideosFreeViewSet
router = routers.DefaultRouter()
router.register('videos', VideoViewSet, basename='videos')
router.register('categorias', CategoriaViewSet, basename='categorias')
urlpatterns = [
path('videos/free/', VideosFreeViewSet.as_view(), name='videos_free_list'),
path('categorias/<int:id>/videos/', CategoriaVideosViewSet.as_view(), name='videos_categoria_list'),
path('', include(router.urls)),
]
|
diegoamferreira/challange_alura_be1
|
aluraflix/urls.py
|
urls.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "aluraflix.views.VideoViewSet",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "aluraflix.views.CategoriaViewSet",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.VideosFreeViewSet.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.VideosFreeViewSet",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.CategoriaVideosViewSet.as_view",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aluraflix.views.CategoriaVideosViewSet",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 13,
"usage_type": "call"
}
] |
3919544072
|
# standard python libraries
import os
import re
import csv
import json
import operator
import statistics
import collections
from operator import itemgetter
# custom libraries
from webxray.Analyzer import Analyzer
from webxray.Utilities import Utilities
class Reporter:
"""
Manages the production of a number of CSV reports.
"""
def __init__(self, db_name, db_engine, num_tlds, num_results, tracker_threshold = None, flush_domain_owners = True, start_date = False, end_date = False):
"""
This performs a few start-up tasks:
- sets up some useful global variables
- makes sure we have a directory to store the reports
- flushes the existing domain_owner mappings (this can be disabled)
- if we want to do per-tld reports, figures out the most common
- if we want to filter against a given tracker threshold, sets it
up here (see documentation below for tracker threshold)
"""
# set various global vars
self.db_name = db_name
self.num_tlds = num_tlds
self.num_results = num_results
self.tracker_threshold = tracker_threshold
# pass utilities the database info
self.utilities = Utilities(db_name, db_engine)
# set up the analyzer we will be using throughout
self.analyzer = Analyzer(db_name, db_engine)
# number of decimal places to round to in reports
self.num_decimals = 2
# set up global db connection
if db_engine == 'sqlite':
from webxray.SQLiteDriver import SQLiteDriver
self.sql_driver = SQLiteDriver(db_name)
elif db_engine == 'postgres':
from webxray.PostgreSQLDriver import PostgreSQLDriver
self.sql_driver = PostgreSQLDriver(db_name)
else:
print('INVALID DB ENGINE FOR %s, QUITTING!' % db_engine)
quit()
print('\t=============================')
print('\t Checking Output Directories ')
print('\t=============================')
# creates a new directory if it doesn't exist already
self.report_path = self.utilities.setup_report_dir(self.db_name)
# this is used in various places to get owner information
self.domain_owners = self.utilities.get_domain_owner_dict()
# if we want to get sub-reports for the most frequent tlds we find
# them here
if self.num_tlds:
print('\t=====================')
print('\t Getting top %s tlds' % self.num_tlds)
print('\t=====================')
print('\t\tProcessing...', end='', flush=True)
self.top_tlds = self.analyzer.get_top_tlds(self.num_tlds)
print('done!')
print('\t\tThe top tlds are:')
for tld in self.top_tlds:
if tld: print('\t\t |- %s' % tld)
else:
self.top_tlds = [None]
# __init__
#####################
# REPORT GENERATORS #
#####################
def generate_db_summary_report(self,print_to_cli=True):
"""
outputs and stores report of basic data about how many records in db, etc.
"""
print('\t================')
print('\t General Summary')
print('\t================')
# get the relevant db summary data
db_summary = self.analyzer.get_db_summary()
# print to cli
if print_to_cli:
print("\t\tTotal Crawls:\t\t\t%s" % db_summary['total_crawls_ok'])
print("\t\tTotal Pages:\t\t\t%s" % db_summary['total_pages_ok'])
print("\t\tTotal Tasks Fail:\t\t%s" % db_summary['total_tasks_fail'])
print("\t\tTotal Tasks Attempted:\t\t%s" % db_summary['total_tasks_attempted'])
print("\t\t%% Pages OK:\t\t\t%.2f%%" % db_summary['percent_tasks_ok'])
print("\t\tTotal Errors:\t\t\t%s" % db_summary['total_errors'])
print("\t\tTotal Cookies:\t\t\t%s" % db_summary['total_cookies'])
print("\t\tTotal 3P Cookies:\t\t%s" % db_summary['total_3p_cookies'])
print("\t\tTotal Dom Storage:\t\t%s" % db_summary['total_dom_storage'])
print("\t\tTotal Websockets:\t\t%s" % db_summary['total_websockets'])
print("\t\tTotal Websocket Events:\t\t%s" % db_summary['total_websocket_events'])
print("\t\tTotal Requests:\t\t\t%s" % db_summary['total_requests'])
print("\t\tTotal Responses:\t\t%s" % db_summary['total_responses'])
print('\t\t%% Requests Received:\t\t%.2f%%' % db_summary['percent_requests_received'])
print("\t\t3P Requests:\t\t\t%s" % db_summary['total_3p_requests'])
print("\t\t3P Responses:\t\t\t%s" % db_summary['total_3p_responses'])
print('\t\t%% 3P Requests Received:\t\t%.2f%%' % db_summary['percent_3p_requests_received'])
print('\t\t'+'-'*40)
# write results to csv
csv_rows = []
csv_rows.append(('total_crawls_ok', db_summary['total_crawls_ok']))
csv_rows.append(('total_pages_ok', db_summary['total_pages_ok']))
csv_rows.append(('total_tasks_fail', db_summary['total_tasks_fail']))
csv_rows.append(('total_tasks_attempted', db_summary['total_tasks_attempted']))
csv_rows.append(('percent_pages_ok', db_summary['percent_tasks_ok']))
csv_rows.append(('total_errors', db_summary['total_errors']))
csv_rows.append(('total_cookies', db_summary['total_cookies']))
csv_rows.append(('total_3p_cookies', db_summary['total_3p_cookies']))
csv_rows.append(('total_dom_storage', db_summary['total_dom_storage']))
csv_rows.append(('total_websockets', db_summary['total_websockets']))
csv_rows.append(('total_websocket_events', db_summary['total_websocket_events']))
csv_rows.append(('total_requests', db_summary['total_requests']))
csv_rows.append(('total_responses', db_summary['total_responses']))
csv_rows.append(('percent_requests_received', db_summary['percent_requests_received']))
csv_rows.append(('total_3p_requests', db_summary['total_3p_requests']))
csv_rows.append(('total_3p_responses', db_summary['total_3p_responses']))
csv_rows.append(('percent_3p_requests_received', db_summary['percent_3p_requests_received']))
self.utilities.write_csv(self.report_path,'db_summary.csv', csv_rows)
# generate_db_summary_report
def generate_stats_report(self):
"""
High level stats
"""
print('\t=============================')
print('\t Processing High-Level Stats ')
print('\t=============================')
for tld_filter in self.top_tlds:
csv_rows = []
if tld_filter:
stats = self.analyzer.get_high_level_stats(tld_filter)
else:
stats = self.analyzer.get_high_level_stats()
if self.tracker_threshold:
filter_depth = self.tracker_threshold
else:
filter_depth = 'no_filter_used'
csv_rows.append(('n_pages', stats['total_pages']))
csv_rows.append(('n_crawls', stats['total_crawls']))
csv_rows.append(('%_pages_ssl', stats['percent_pages_ssl']))
csv_rows.append(('n_requests_received', stats['total_requests_received']))
csv_rows.append(('%_requests_received_ssl', stats['percent_requests_ssl']))
csv_rows.append(('n_1p_requests_received', stats['total_requests_received_1p']))
csv_rows.append(('%_1p_requests_received_ssl', stats['percent_1p_requests_ssl']))
csv_rows.append(('n_3p_requests_received', stats['total_requests_received_3p']))
csv_rows.append(('%_3p_requests_received_ssl', stats['percent_3p_requests_ssl']))
csv_rows.append(('average_page_load_time', stats['average_page_load_time']))
csv_rows.append(('%_w/3p_request', stats['percent_w_3p_request']))
csv_rows.append(('%_w/3p_cookie', stats['percent_w_3p_cookie']))
csv_rows.append(('%_w/3p_script', stats['percent_w_3p_script']))
csv_rows.append(('mean_3p_domains', stats['3p_domains_mean']))
csv_rows.append(('median_3p_domains', stats['3p_domains_median']))
csv_rows.append(('mode_3p_domains', stats['3p_domains_mode']))
csv_rows.append(('mean_3p_cookies', stats['3p_cookies_mean']))
csv_rows.append(('median_3p_cookies', stats['3p_cookies_median']))
csv_rows.append(('mode_3p_cookies', stats['3p_cookies_mode']))
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-stats.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'stats.csv',csv_rows)
# generate_stats_report
def generate_aggregated_tracking_attribution_report(self):
"""
generates ranked list of which entities collect data
from the greatest number of crawls ('aggregated_tracking_attribution.csv')
- entities which have subsidiaries are ranked according
to the crawls their subsidiaries get data from as well
- however, parent entities only get one hit on
a crawl which has multiple subsidiaries present
- for example, if a crawl has 'google analytics' and 'doubleclick'
that is only one hit for 'google'
"""
print('\t======================================')
print('\t Processing Aggregated Tracking Report ')
print('\t======================================')
for tld_filter in self.top_tlds:
csv_rows = []
# write out data to csv
for item in self.analyzer.get_aggregated_tracking_attribution(tld_filter):
csv_rows.append((
item['percent_crawls'],
item['owner_name'],
item['owner_country'],
self.utilities.get_domain_owner_lineage_combined_string(item['owner_id'])
)
)
# we want to first sort by owner name and then by percentage
# to account for cases where two owners have the same percentage value
csv_rows.sort(key=lambda x: x[1].lower())
csv_rows.sort(key=lambda x: x[0],reverse=True)
# insert header row after sort
csv_rows.insert(0, ('percentage_crawls_tracked','owner','owner_country','owner_lineage'))
# write out csv with tld prefix if applicable
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-aggregated_tracking_attribution.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'aggregated_tracking_attribution.csv',csv_rows)
# generate_aggregated_tracking_attribution_report
def generate_aggregated_3p_ssl_use_report(self):
"""
this report tells us the percentage of requests made to a given
third-party are encrypted
"""
print('\t=========================================')
print('\t Processing Aggregated 3P SSL Use Report ')
print('\t=========================================')
for tld_filter in self.top_tlds:
csv_rows = []
for item in self.analyzer.get_aggregated_3p_ssl_use(tld_filter):
csv_rows.append((
item['ssl_use'],
item['owner_name'],
item['owner_country'],
self.utilities.get_domain_owner_lineage_combined_string(item['owner_id'])
))
# we want to first sort by owner name and then by percentage
# to account for cases where two owners have the same percentage value
csv_rows.sort(key=lambda x: x[1].lower())
csv_rows.sort(key=lambda x: x[0],reverse=True)
# insert header row after sort
csv_rows.insert(0, ('percent_requests_encrypted','owner','owner_country','owner_lineage'))
# write out csv with tld prefix if applicable
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_ssl_use.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_ssl_use.csv',csv_rows)
# generate_aggregated_3p_ssl_use_report
def generate_3p_domain_report(self):
"""
This report tells us the most commonly occuring third-party domains.
"""
print('\t==============================')
print('\t Processing 3P Domains Report ')
print('\t==============================')
for tld_filter in self.top_tlds:
csv_rows = []
csv_rows.append(('percent_total','domain','owner','owner_country', 'owner_lineage'))
# get_3p_domain_percentages returns a list, we slice it to get only desired num_results
for item in self.analyzer.get_3p_domain_percentages(tld_filter)[:self.num_results]:
# figure out the lineage string if we know who owns the domain
if item['owner_id'] != None:
lineage_string = self.utilities.get_domain_owner_lineage_combined_string(item['owner_id'])
else:
lineage_string = None
csv_rows.append((
item['percent_crawls'],
item['domain'],
item['owner_name'],
item['owner_country'],
lineage_string
))
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_domains.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_domains.csv',csv_rows)
# generate_3p_domain_report
def generate_3p_request_report(self,request_type=None):
"""
this queries the db to get all requests, domains, or domain owners
next they are counted to find the most common
and formatted to csv rows and returned
"""
if request_type == 'script':
print('\t=============================')
print('\t Processing 3P Script Report ')
print('\t=============================')
else:
print('\t==============================')
print('\t Processing 3P Request Report ')
print('\t==============================')
for tld_filter in self.top_tlds:
csv_rows = []
csv_rows.append(('percent_total','request','type','domain','owner','owner_country','owner_lineage'))
# get_3p_domain_percentages returns a list, we slice it to get only desired num_results
for item in self.analyzer.get_3p_request_percentages(tld_filter,request_type)[:self.num_results]:
# figure out the lineage string if we know who owns the domain
if item['request_owner_id'] != None:
lineage_string = self.utilities.get_domain_owner_lineage_combined_string(item['request_owner_id'])
else:
lineage_string = None
csv_rows.append((
item['percent_crawls'],
item['request_url'],
item['request_type'],
item['request_domain'],
item['request_owner_name'],
item['request_owner_country'],
lineage_string
))
if tld_filter:
if request_type:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_'+request_type+'.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_request.csv',csv_rows)
else:
if request_type:
self.utilities.write_csv(self.report_path,'3p_'+request_type+'.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_request.csv',csv_rows)
# generate_3p_request_report
def generate_data_transfer_report(self):
"""
These reports tell us how much data was transferred across several dimensions
"""
print('\t==================================')
print('\t Processing Data Transfer Reports ')
print('\t==================================')
for tld_filter in self.top_tlds:
# set up filter and file names
if tld_filter:
summary_file_name = tld_filter+'-data_xfer_summary.csv'
domain_file_name = tld_filter+'-data_xfer_by_domain.csv'
aggregated_file_name = tld_filter+'-data_xfer_aggregated.csv'
else:
summary_file_name = 'data_xfer_summary.csv'
domain_file_name = 'data_xfer_by_domain.csv'
aggregated_file_name = 'data_xfer_aggregated.csv'
# get the data from db, tuple of (response_domain, size, is_3p (boolean), domain_owner_id)
response_sizes = self.sql_driver.get_response_sizes()
# initialize vars
first_party_data = 0
third_party_data = 0
total_data = 0
# need Counter object, allows sorting later
domain_data = collections.Counter()
owner_data = collections.Counter()
# process each row
for item in response_sizes:
response_domain = item[0]
response_size = item[1]
response_is_3p = item[2]
domain_owner_id = item[3]
# this is the measure of all data downloaded
total_data += response_size
# measures for third and first party data
if response_is_3p:
third_party_data += response_size
else:
first_party_data += response_size
# data by domain, increment if already in there, otherwise new entry
if response_domain in domain_data:
domain_data[response_domain] += response_size
else:
domain_data[response_domain] = response_size
# only if we know the owner, increment
if domain_owner_id:
for lineage_id in self.utilities.get_domain_owner_lineage_ids(domain_owner_id):
if lineage_id in owner_data:
owner_data[lineage_id] += response_size
else:
owner_data[lineage_id] = response_size
# avoid divide-by-zero
if total_data == 0:
print('\t\tTotal data is zero, no report')
return
# output data to csv
summary_data_csv = []
summary_data_csv.append(('party','percent_total','data_transfered_bytes'))
summary_data_csv.append(('all','100',total_data))
summary_data_csv.append((
'First',
round((first_party_data/total_data)*100, self.num_decimals),
first_party_data))
summary_data_csv.append((
'Third',
round((third_party_data/total_data)*100, self.num_decimals),
third_party_data))
self.utilities.write_csv(self.report_path,summary_file_name, summary_data_csv)
# sort and output ranked data
domain_data = domain_data.most_common()
domain_data.sort()
domain_data.sort(reverse=True, key=lambda item:item[1])
# for csv data
domain_data_csv = []
domain_data_csv.append(('percent_total','domain','data_transfered_bytes'))
# if num_results is None we get everything, otherwise stops at limit
for item in domain_data[:self.num_results]:
domain_data_csv.append((
round((item[1]/total_data)*100,self.num_decimals),
item[0],
item[1]))
self.utilities.write_csv(self.report_path,domain_file_name, domain_data_csv)
owner_data = self.utilities.get_most_common_sorted(owner_data)
owner_data_csv = []
owner_data_csv.append(('percent_total','owner','owner_country','owner_lineage','data_transfered_bytes'))
# get results for all known owners
for item in owner_data:
owner_data_csv.append((
round((item[1]/total_data)*100,self.num_decimals),
self.domain_owners[item[0]]['owner_name'],
self.domain_owners[item[0]]['country'],
self.utilities.get_domain_owner_lineage_combined_string(item[0]),
item[1]
))
self.utilities.write_csv(self.report_path,aggregated_file_name, owner_data_csv)
# generate_data_transfer_report
def generate_use_report(self):
"""
This function handles the process of generating a csv report which details
what percentage of pages use third-party content for specific uses,
the number of requests made for a given type of use on a per-page basis,
and the percentage of such requests which correspond to a third-party
cookie.
"""
print('\t==========================')
print('\t Processing 3P Use Report ')
print('\t==========================')
for tld_filter in self.top_tlds:
use_data = self.analyzer.get_3p_use_data(tld_filter)
all_uses = use_data['all_uses']
percentage_by_use = use_data['percentage_by_use']
average_use_occurance_per_page = use_data['average_use_occurance_per_crawl']
percentage_use_w_cookie = use_data['percentage_use_w_cookie']
percentage_use_ssl = use_data['percentage_use_ssl']
csv_rows = []
csv_rows.append(('use_category','percent_crawls_w_use','ave_occurances_per_page','percentage_of_use_w_cookie', 'percentage_of_use_ssl'))
for use in sorted(all_uses):
if percentage_by_use[use] != None:
csv_rows.append((
use,
percentage_by_use[use],
average_use_occurance_per_page[use],
percentage_use_w_cookie[use],
percentage_use_ssl[use]
))
else:
csv_rows.append((use,None,None,None,None))
# write out csv with tld prefix if applicable
if tld_filter:
self.utilities.write_csv(self.report_path,tld_filter+'-3p_uses.csv',csv_rows)
else:
self.utilities.write_csv(self.report_path,'3p_uses.csv',csv_rows)
# generate_use_report
def generate_per_page_network_report(self):
"""
this report generates data necessary for graph/network analysis by
outputting a list of page domains and the requests/owners they connect to
on a per-page basis
"""
print('\t====================================')
print('\t Processing Per-Page Network Report ')
print('\t====================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append(('page_start_url','page_final_url','page_accessed','3p_request_domain','3p_domain_owner','3p_domain_owner_country'))
# process all records
for item in self.analyzer.get_page_to_3p_network():
csv_rows.append((
item['page_start_url'],
item['page_final_url'],
item['page_accessed'],
item['request_domain'],
item['request_owner_name'],
item['request_owner_country']
))
self.utilities.write_csv(self.report_path,'per_page_network_report.csv', csv_rows)
# generate_per_page_network_report
def generate_per_site_network_report(self):
"""
this report generates data necessary for graph/network analysis by
outputting a list of page domains and the requests/owners they connect to
aggregated on a per-site basis (eg combining all pages)
"""
print('\t================================')
print('\t Processing Site Network Report ')
print('\t================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append(('page_domain','3p_request_domain','3p_domain_owner','3p_domain_owner_country'))
for item in self.analyzer.get_site_to_3p_network():
csv_rows.append((
item['page_domain'],
item['request_domain'],
item['request_owner_name'],
item['request_owner_country']
))
self.utilities.write_csv(self.report_path,'per_site_network_report.csv', csv_rows)
# generate_per_site_network_report
def generate_all_pages_request_dump(self):
"""
Full dump of all requests loaded by all pages across all load times.
Default is 3p only, can be overridden.
"""
print('\t===================================')
print('\t Processing All Pages request Dump ')
print('\t===================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append((
'accessed',
'start_url',
'final_url',
'request_url',
'request_domain',
'domain_owner'
))
# process all records
for item in self.analyzer.get_all_pages_requests():
csv_rows.append((
item['accessed'],
item['start_url'],
item['final_url'],
item['request_url'],
item['request_domain'],
item['request_domain_owner']
))
self.utilities.write_csv(self.report_path,'all_pages_request_dump.csv', csv_rows)
# generate_all_pages_request_dump
def generate_all_pages_cookie_dump(self):
"""
Full dump of all cookies loaded by all pages across all load times.
Default is 1p and 3p, can be overridden to 3p only.
"""
print('\t==================================')
print('\t Processing All Pages Cookie Dump ')
print('\t==================================')
# put output here
csv_rows = []
# header row for csv
csv_rows.append((
'accessed',
'start_url',
'final_url',
'cookie_domain',
'cookie_owner',
'cookie_name',
'cookie_value'
))
# process all records
for item in self.analyzer.get_all_pages_cookies():
csv_rows.append((
item['accessed'],
item['start_url'],
item['final_url'],
item['cookie_domain'],
item['cookie_owner'],
item['cookie_name'],
item['cookie_value']
))
self.utilities.write_csv(self.report_path,'all_pages_cookie_dump.csv', csv_rows)
# generate_all_pages_request_dump
def generate_site_host_report(self):
"""
First, we update the domain table with the owners
of the various ip addresses which gives us
a mapping of pages to hosts.
Second, we generate a network report for
site domains to hosts.
"""
print('\t=====================')
print('\t Updating Site Hosts ')
print('\t=====================')
self.analyzer.update_site_hosts()
print('\t==============================')
print('\t Generating Site Host Network ')
print('\t==============================')
site_host_data = self.analyzer.get_site_host_network()
if len(site_host_data) == 0:
print('\t\tNo site host data, skipping report.')
return
# put output here
csv_rows = []
# header row for csv
csv_rows.append((
'page_domain',
'host_name'
))
for item in site_host_data:
csv_rows.append((
item['site_domain'],
item['host_name']
))
self.utilities.write_csv(self.report_path,'site_hosts-network.csv', csv_rows)
print('\t============================================')
print('\t Generating Aggregate Host Ownership Report ')
print('\t============================================')
owner_occurances = []
for owner, in self.sql_driver.get_ip_owners():
owner_occurances.append(owner)
csv_rows = [('owner','percent_sites_w_owner')]
for item in self.utilities.get_most_common_sorted(owner_occurances):
csv_rows.append((item[0],100*(item[1]/len(owner_occurances))))
self.utilities.write_csv(self.report_path,'site_hosts-aggregated.csv', csv_rows)
# generate_site_host_report
##############
# POLICYXRAY #
##############
def initialize_policy_reports(self):
"""
Run various pre-production steps.
"""
print('\t====================================')
print('\t Updating 3p Domain Disclosure Data ')
print('\t====================================')
#self.analyzer.update_request_disclosure()
self.analyzer.update_crawl_disclosure()
print('\t\t...done!')
print('\t======================================')
print('\t Getting Policy Types List and Counts ')
print('\t======================================')
# pre-populate with 'None' which gives all policies
self.policy_types = [
{
'type' : None,
'count' : self.analyzer.get_policy_count()
}
]
for policy_type, in self.sql_driver.get_available_policy_types():
self.policy_types.append({
'type': policy_type,
'count': self.analyzer.get_policy_count(policy_type=policy_type)
})
print('\t\t...done!')
# initialize_policy_reports
def generate_policy_summary_report(self):
"""
Conducts prelminary analysis steps, determines what types of
policies we have, and then initiates the pertinent reports.
"""
print('\t==================================')
print('\t Generating Policy Summary Report ')
print('\t==================================')
# header row
csv_rows = [('Type','N','Word Count','FK Grade','FRE', '% 3P Disclosed')]
# get results for each policy_type
for policy_type in self.policy_types:
# makes reports clearer than 'None'
if policy_type['type'] == None:
this_policy_type = 'all'
else:
this_policy_type = policy_type['type']
print('\t\tProcessing %s...' % this_policy_type, end='', flush=True)
# fetch results
readability_scores = self.analyzer.get_readability_scores(policy_type=policy_type['type'])
csv_rows.append((
this_policy_type,
policy_type['count'],
self.analyzer.get_average_policy_word_count(policy_type=policy_type['type']),
readability_scores['ave_fkg'],
readability_scores['ave_fre'],
self.analyzer.get_percent_crawl_3p_domains_disclosed(policy_type=policy_type['type'])
))
print('done!')
self.utilities.write_csv(self.report_path,'policy-summary.csv', csv_rows)
# generate_policy_summary_report
def generate_policy_owner_disclosure_reports(self):
"""
Determines what types of policies we have, and then
initiates the pertinent reports.
"""
print('\t======================================')
print('\t Generating Company Disclosure Report ')
print('\t======================================')
# header row
csv_rows = [('Type','N','%% 3P Disclosed')]
print('\t\tProcessing ...', end='', flush=True)
company_results = self.analyzer.get_disclosure_by_request_owner()
csv_rows = [('Domain Owner','Total Occurances','Total Disclosures','Percent Disclosed')]
for item in company_results:
csv_rows.append((item,company_results[item][0],company_results[item][1],round(company_results[item][2],2)))
print('done!')
self.utilities.write_csv(self.report_path,'policy-owner_disclosure.csv',csv_rows)
# generate_policy_owner_disclosure_reports
def generate_policy_gdpr_report(self):
"""
Determine percentage of all policy types
that contain gdpr article 9 terms.
"""
print('\t==============================')
print('\t Generating GDPR Term Report ')
print('\t==============================')
term_list = [
'racial or ethnic origin', 'political opinions',
'religious or philosophical beliefs', 'trade union membership',
'genetic data', 'biometric data',
'data concerning health', 'sex life',
'sexual orientation'
]
self.generate_terms_report('policy-gdpr_terms.csv',term_list)
# generate_policy_gdpr_report
def generate_policy_pacification_report(self):
"""
Determine percentage of all policy types
that contain pacification terms.
"""
print('\t=====================================')
print('\t Generating Pacification Term Report ')
print('\t=====================================')
term_list = ['we value', 'we respect', 'important to us', 'help you', 'we care', 'committed to protecting', 'cares about', 'transparency']
self.generate_terms_report('policy-pacification_terms.csv',term_list)
# generate_policy_pacification_report
def generate_policy_pii_report(self):
"""
Determine percentage of all policy types
that contain pacification terms.
"""
print('\t============================')
print('\t Generating PII Term Report ')
print('\t============================')
term_list = ['ip address','internet protocol address', 'browser type', 'operating system']
self.generate_terms_report('policy-pii_terms.csv',term_list)
# generate_policy_pacification_report
def generate_terms_report(self,report_name,term_list):
"""
Generic function to generate reports on how
often terms appear in policies.
"""
# set up header row
csv_rows = []
header_row = ('Type','any term')
for term in term_list:
header_row = header_row + (term,)
csv_rows.append(header_row)
# get results for each policy_type
for policy_type in self.policy_types:
# makes reports clearer than 'None'
if policy_type['type'] == None:
this_policy_type = 'all'
else:
this_policy_type = policy_type['type']
print('\t\tProcessing %s...' % this_policy_type, end='', flush=True)
this_csv_row = (this_policy_type,)
this_csv_row = this_csv_row + (self.analyzer.get_terms_percentage(term_list,policy_type=policy_type['type'],policy_type_count=policy_type['count']),)
for term in term_list:
this_csv_row = this_csv_row + (self.analyzer.get_terms_percentage([term],policy_type=policy_type['type'],policy_type_count=policy_type['count']),)
csv_rows.append(this_csv_row)
print('done!')
self.utilities.write_csv(self.report_path,report_name,csv_rows)
# generate_policy_gdpr_report
# Reporter
|
thezedwards/webXray
|
webxray/Reporter.py
|
Reporter.py
|
py
| 30,709 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "webxray.Utilities.Utilities",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "webxray.Analyzer.Analyzer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "webxray.SQLiteDriver.SQLiteDriver",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "webxray.PostgreSQLDriver.PostgreSQLDriver",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 384,
"usage_type": "call"
}
] |
1741943302
|
import typing
from typing import Optional, Tuple, Any, Type, Dict
import numpy as np
from .mode import Q
from piquasso.core import _mixins
from piquasso.api.exceptions import PiquassoException, InvalidProgram
if typing.TYPE_CHECKING:
from piquasso.api.program import Program
class Instruction(_mixins.DictMixin, _mixins.RegisterMixin, _mixins.CodeMixin):
"""
Base class for all instructions.
Args:
params: Mapping of parameters specified by the users.
extra_params: Mapping of extra parameters, typically calculated ones.
"""
NUMBER_OF_MODES: Optional[int] = None
_subclasses: Dict[str, Type["Instruction"]] = {}
def __init__(
self, *, params: Optional[dict] = None, extra_params: Optional[dict] = None
) -> None:
self._params: dict = params or dict()
self._extra_params: dict = extra_params or dict()
@property
def params(self) -> dict:
return self._params
@property
def _all_params(self) -> dict:
return {**self._params, **self._extra_params}
@property
def modes(self) -> Tuple[int, ...]:
return getattr(self, "_modes", tuple())
@modes.setter
def modes(self, value: Tuple[int, ...]) -> None:
self._validate_modes(value)
self._modes = value
def _as_code(self) -> str:
if hasattr(self, "modes"):
mode_string = ", ".join([str(mode) for mode in self.modes])
else:
mode_string = ""
if hasattr(self, "params"):
params_string = "{}".format(
", ".join(
[
f"{key}={self._param_repr(value)}"
for key, value in self.params.items()
]
)
)
else:
params_string = ""
return f"pq.Q({mode_string}) | pq.{self.__class__.__name__}({params_string})"
@staticmethod
def _param_repr(value: Any) -> str:
if isinstance(value, np.ndarray):
return "np." + repr(value)
return value
def on_modes(self, *modes: int) -> "Instruction":
if modes is not tuple():
self.modes: Tuple[int, ...] = modes
return self
def _apply_to_program_on_register(self, program: "Program", register: Q) -> None:
program.instructions.append(self.on_modes(*register.modes))
@classmethod
def from_dict(cls, dict_: dict) -> "Instruction":
"""Creates an :class:`Instruction` instance from a dict specified.
Args:
dict_ (dict):
The desired :class:`Instruction` instance in the format of a `dict`.
Returns:
Instruction:
An :class:`Instruction` initialized using the specified `dict`.
"""
class_ = cls.get_subclass(dict_["type"])
instruction = class_(**dict_["attributes"]["constructor_kwargs"])
instruction.modes = dict_["attributes"]["modes"]
return instruction
@classmethod
def set_subclass(cls, instruction: Type["Instruction"]) -> None:
"""Registers a class in the instruction subclass map.
This is meaningful in contexts when one has multiple instructions with the same
name.
Example:
When one creates a custom beamsplitter with name `Beamsplitter` and
subclasses :class:`~piquasso.instructions.gates.Beamsplitter`, then for e.g.
executing a Blackbird code will be performed with the custom one, not the
original one. When one wants to use the original one in this case, one can
reset it with this method.
Args:
instruction (Type[Instruction]): The instruction class to be registered.
Raises:
PiquassoException:
When the class is not actually an instance of :class:`Insruction`.
"""
if not issubclass(instruction, Instruction):
raise PiquassoException(
f"The instruction '{instruction}' needs to be a subclass of "
"'pq.Instruction'."
)
cls._subclasses[instruction.__name__] = instruction
@classmethod
def get_subclass(cls, name: str) -> Type["Instruction"]:
"""Returns the instruction subclass specified by its name.
Returns:
Type[Instruction]: The instruction class.
"""
return cls._subclasses[name]
def __repr__(self) -> str:
if hasattr(self, "modes"):
modes = "modes={}".format(self.modes)
else:
modes = ""
if getattr(self, "params") != {}:
params = "{}, ".format(
", ".join([f"{key}={value}" for key, value in self.params.items()])
)
else:
params = ""
classname = self.__class__.__name__
return f"<pq.{classname}({params}{modes})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, Instruction):
return False
return self.modes == other.modes and self.params == other.params
def __init_subclass__(cls) -> None:
super().__init_subclass__()
cls.set_subclass(cls)
def _validate_modes(self, modes):
if self.NUMBER_OF_MODES is not None and len(modes) != self.NUMBER_OF_MODES:
raise InvalidProgram(
f"The modes '{modes}' got specifed for the instruction '{self}', but "
f"exactly '{self.NUMBER_OF_MODES}' mode needs to be specified. "
f"Concretely, the total number of modes specified for this instruction "
f"is 'len(modes) == len({modes}) == {len(modes)} != "
f"{self.NUMBER_OF_MODES}'."
)
class Preparation(Instruction):
"""Base class for preparations."""
class Gate(Instruction):
"""Base class for gates."""
class Measurement(Instruction):
r"""Base class for measurements."""
|
Budapest-Quantum-Computing-Group/piquasso
|
piquasso/api/instruction.py
|
instruction.py
|
py
| 5,969 |
python
|
en
|
code
| 19 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "piquasso.core._mixins.DictMixin",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "piquasso.core._mixins",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "piquasso.core._mixins.RegisterMixin",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "piquasso.core._mixins.CodeMixin",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "mode.Q",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "piquasso.api.exceptions.PiquassoException",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "piquasso.api.exceptions.InvalidProgram",
"line_number": 177,
"usage_type": "call"
}
] |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 135