seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38043006452
|
import cloudinary.uploader
import requests
# define your S3 bucket name here.
S3_BUCKET_NAME = "akshayranganath"
def get_file_name(url, transformation):
# transformation will be of the format "t_text_removed/jpg".
# remove the "/jpg" part and the "t_" part
transformation = transformation.rsplit('/',1)[0].split('t_',1)[1]
# from the URL, extract the file name. This will be of the format: 1000000010144_7GuardiansoftheTomb_portrait3x4.jpg
# For this file name, insert the transformation from above as the last component in the file name
file_name = url.rsplit('/',1)[1].replace('.jpg','')
# if file name as the format s3_akshayranganath_, remove the prepended file name part
# by default, Cloudinary will create the file name like s3_akshayranganath_1000000010144_7GuardiansoftheTomb_portrait3x4
file_name = file_name.replace(f"s3_{S3_BUCKET_NAME}_","")
file_name = file_name + '_' + transformation + '.jpg'
print(file_name)
return file_name
def download_and_save(url, file_name):
# download the image and save it with the desired file name
resp = requests.get(url)
with open(file_name, 'wb') as w:
w.write(resp.content)
def delete_image(public_id):
# delete the image since transformation is now complete
resp = cloudinary.uploader.destroy(
public_id,
type='upload',
resource_type='image'
)
def main():
try:
# upload the file. Create the necessary AI based deriviates inline.
# no need to wait for any webhook notifications.
print("Uploading and transforming image ..")
resp = cloudinary.uploader.upload(
f's3://{S3_BUCKET_NAME}/1000000010144_7GuardiansoftheTomb_portrait3x4.jpeg',
upload_preset='ai_preset'
)
print("Done.")
# response will contain the URLs for the transformations.
# extract these URLs and download the images
for transform in resp['eager']:
tx = transform['transformation']
url = transform['secure_url']
file_name = get_file_name(url, tx)
download_and_save(url, file_name)
print("Transformations downloaded successfully")
# optional - delete the file once the transformations are download
delete_image(resp['public_id'])
print(f"Image {resp['public_id']} deleted successfully.")
except Exception as e:
print(e)
if __name__=="__main__":
main()
|
akshay-ranganath/create-and-upload
|
demo_upload_and_download.py
|
demo_upload_and_download.py
|
py
| 2,558 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cloudinary.uploader.uploader.destroy",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cloudinary.uploader.uploader",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cloudinary.uploader",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "cloudinary.uploader.uploader.upload",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cloudinary.uploader.uploader",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "cloudinary.uploader",
"line_number": 42,
"usage_type": "name"
}
] |
32869975011
|
from fastapi import APIRouter
from api.schemes import relations, responses
from database import redis
def add_relation(rel: relations.Relation, rel_name: str) -> responses.RelationOperations:
if redis.add_relation(rel_name, rel.user_id, rel.item_id):
return responses.RelationOperations(status="successful")
return responses.RelationOperations(status="unsuccessful", action="relate")
def rem_relation(rel: relations.Relation, rel_name: str) -> responses.RelationOperations:
if redis.rem_relation(rel_name, rel.user_id, rel.item_id):
return responses.RelationOperations(status="successful")
return responses.RelationOperations(status="unsuccessful", action="unrelate")
u2u_router = APIRouter(
prefix="/u2u",
tags=["User2User API"]
)
@u2u_router.post("")
def add_user(u2u: relations.User2User) -> responses.RelationOperations:
add_relation(rel=u2u, rel_name="u2u")
@u2u_router.delete("")
def rem_user(u2u: relations.User2User) -> responses.RelationOperations:
rem_relation(rel=u2u, rel_name="u2u")
u2p_router = APIRouter(
prefix="/u2p",
tags=["User2Post API"]
)
@u2p_router.post("")
def add_post(u2p: relations.User2Post) -> responses.RelationOperations:
add_relation(rel=u2p, rel_name="u2p")
@u2p_router.delete("")
def rem_post(u2p: relations.User2Post) -> responses.RelationOperations:
rem_relation(rel=u2p, rel_name="u2u")
u2c_router = APIRouter(
prefix="/u2c",
tags=["User2Comm API"]
)
@u2c_router.post("")
def add_comm(u2c: relations.User2Comm) -> responses.RelationOperations:
add_relation(rel=u2c, rel_name="u2c")
@u2c_router.delete("")
def rem_comm(u2c: relations.User2Comm) -> responses.RelationOperations:
rem_relation(rel=u2c, rel_name="u2c")
|
Muti-Kara/sylvest_recommender
|
api/routers/relations.py
|
relations.py
|
py
| 1,755 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "api.schemes.relations.Relation",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "database.redis.add_relation",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "database.redis",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "api.schemes.responses",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "api.schemes.responses",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "api.schemes.relations.Relation",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "database.redis.rem_relation",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "database.redis",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "api.schemes.responses",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "api.schemes.responses",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "api.schemes.relations.User2User",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "api.schemes.relations.User2User",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "api.schemes.relations.User2Post",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "api.schemes.relations.User2Post",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "api.schemes.relations.User2Comm",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "api.schemes.relations.User2Comm",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.relations",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "api.schemes.responses.RelationOperations",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "api.schemes.responses",
"line_number": 63,
"usage_type": "name"
}
] |
15306370520
|
""" File: eulerCharacteristics.py
Description: calculates the characteristics of the 2D Euler equation.
This includes the flux and the eigenvectors associated with it
Author: Pierre-Yves Taunay
Date: November 2018
"""
import numpy as np
from utils import P_from_Ev
GAM = 1.4
def compute_euler_flux(U,direction):
rho = U[:,0]
u = U[:,1] / rho
v = U[:,2] / rho
E = U[:,3] / rho
P = P_from_Ev(E,rho,u,v)
flx = np.zeros(U.shape)
if direction == 'dx':
flx[:,0] = rho*u
flx[:,1] = rho*u**2 + P
flx[:,2] = rho*u*v
flx[:,3] = rho*E*u + P*u
elif direction == 'dy':
flx[:,0] = rho*v
flx[:,1] = rho*u*v
flx[:,2] = rho*v**2 + P
flx[:,3] = rho*E*v + P*v
return flx
def eigenvector_x(u,v,a,q,h,nunk):
Rj = np.zeros((nunk,nunk))
Lj = np.zeros((nunk,nunk))
# Right eigenvector
Rj[0,:] = np.ones((1,nunk))
Rj[0,-1] = 0
Rj[1,0] = u - a
Rj[1,1] = u
Rj[1,2] = u + a
Rj[1,3] = 0
Rj[2,0:3] = v
Rj[2,3] = -1
Rj[3,0] = h - a * u
Rj[3,1] = q
Rj[3,2] = h + a * u
Rj[3,3] = -v
# Left eigenvector
Lj[0,0] = (GAM-1)*q + a*u
Lj[0,1] = (1-GAM)*u - a
Lj[0,2] = (1-GAM)*v
Lj[0,3] = (GAM-1)
Lj[0,:] /= (2*a**2)
Lj[1,0] = a**2 - (GAM-1)*q
Lj[1,1] = (GAM-1)*u
Lj[1,2] = (GAM-1)*v
Lj[1,3] = (1-GAM)
Lj[1,:] /= a**2
Lj[2,0] = (GAM-1)*q - a*u
Lj[2,1] = (1-GAM)*u + a
Lj[2,2] = (1-GAM)*v
Lj[2,3] = (GAM-1)
Lj[2,:] /= (2*a**2)
Lj[3,0] = v
Lj[3,2] = -1
return Rj, Lj
def eigenvector_y(u,v,a,q,h,nunk):
Rj = np.zeros((nunk,nunk))
Lj = np.zeros((nunk,nunk))
# Right eigenvector
Rj[0,:] = np.ones((1,nunk))
Rj[0,-1] = 0
Rj[1,0:3] = u
Rj[1,3] = 1
Rj[2,0] = v - a
Rj[2,1] = v
Rj[2,2] = v + a
Rj[2,3] = 0
Rj[3,0] = h - a * v
Rj[3,1] = q
Rj[3,2] = h + a * v
Rj[3,3] = u
# Left eigenvector
Lj[0,0] = (GAM-1)*q + a*v
Lj[0,1] = (1-GAM)*u
Lj[0,2] = (1-GAM)*v - a
Lj[0,3] = (GAM-1)
Lj[0,:] /= (2*a**2)
Lj[1,0] = a**2 - (GAM-1)*q
Lj[1,1] = (GAM-1)*u
Lj[1,2] = (GAM-1)*v
Lj[1,3] = (1-GAM)
Lj[1,:] /= a**2
Lj[2,0] = (GAM-1)*q - a*v
Lj[2,1] = (1-GAM)*u
Lj[2,2] = (1-GAM)*v + a
Lj[2,3] = (GAM-1)
Lj[2,:] /= (2*a**2)
Lj[3,0] = -u
Lj[3,1] = 1
return Rj, Lj
def compute_eigenvector(U,U0,direction):
rho = U[:,0]
u = U[:,1] / rho
v = U[:,2] / rho
E = U[:,3] / rho
P = P_from_Ev(E,rho,u,v)
nunk = U.shape[1]
nelem = U.shape[0]
a = np.sqrt(GAM*P/rho)
q = 1/2*(u**2 + v**2) # Dynamic pressure
h = a**2/(GAM-1) + q # Enthalpy
rho0 = U0[:,0]
u0 = U0[:,1] / rho0
v0 = U0[:,2] / rho0
E0 = U0[:,3] / rho0
P0 = P_from_Ev(E0,rho0,u0,v0)
nunk = U0.shape[1]
nelem = U0.shape[0]
a0 = np.sqrt(GAM*P0/rho0)
q0 = 1/2*(u0**2 + v0**2) # Dynamic pressure
h0 = a0**2/(GAM-1) + q0 # Enthalpy
Rjlist = []
Ljlist = []
if direction == 'dx':
Rlhs0, Llhs0 = eigenvector_x(u0[0],v0[0],a0[0],q0[0],h0[0],nunk)
for idx in range(nelem):
Rj, Lj = eigenvector_x(u[idx],v[idx],a[idx],q[idx],h[idx],nunk)
Rjlist.append(Rj)
Ljlist.append(Lj)
Rlhs0pre = None
Llhs0pre = None
elif direction == 'dy':
# For the y-direction, the bottom boundary can either be pre or post-shock
Rlhs0, Llhs0 = eigenvector_y(u0[0],v0[0],a0[0],q0[0],h0[0],nunk)
Rlhs0pre, Llhs0pre = eigenvector_y(u0[-1],v0[-1],a0[-1],q0[-1],h0[-1],nunk)
for idx in range(nelem):
Rj, Lj = eigenvector_y(u[idx],v[idx],a[idx],q[idx],h[idx],nunk)
Rjlist.append(Rj)
Ljlist.append(Lj)
Rj = Rjlist
Lj = Ljlist
return Rj,Lj,Rlhs0,Llhs0,Rlhs0pre,Llhs0pre
def to_characteristics(U,flx,U0,flx0,order,Lh,alpha,Nx,Ny,direction,options,tc,lambda_calc_char):
nelem = U.shape[0]
nunk = U.shape[1]
# Matrix holders
V = np.zeros((nelem,order+1,nunk))
VLF = np.zeros((nelem,order+1,nunk))
H = np.zeros((nelem,order+1,nunk))
# For all elements, evaluate R_{i+1/2}^-1 * [STENCIL]
# The conditional work for r = 2 (order 5)
# We do the characteristics calculation for all elements for the whole stencil
V,H,VLF = lambda_calc_char(U,flx,U0,flx0,order,Lh,alpha,direction,tc)
return V,H,VLF
|
pytaunay/weno-tests
|
python/euler_2d/eulerCharacteristics.py
|
eulerCharacteristics.py
|
py
| 4,634 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "utils.P_from_Ev",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "utils.P_from_Ev",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "utils.P_from_Ev",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 195,
"usage_type": "call"
}
] |
12646866981
|
from django import forms
from .models import Reservation, Testimonial
class ReservationForm(forms.ModelForm):
name = forms.CharField(label='Your Name', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'placeholder': 'Your Name'
}
))
email = forms.EmailField(label="Your Email", widget=forms.EmailInput(
attrs={
'class': 'form-control',
'id': 'email',
'placeholder': 'Your Email'
}
))
reservation_date = forms.DateTimeField(label="Date & Time",
widget=forms.DateTimeInput(
attrs={
'class': 'form-control datetimepicker-input',
'id': 'datetime',
'placeholder': 'Date & Time',
'data-target': '#date3',
'data-toggle': 'datetimepicker'
}
))
people = forms.IntegerField(label="No Of People", widget=forms.NumberInput(
attrs={
'class': 'form-control',
'id': 'people',
'placeholder': 'No Of People'
}
))
request = forms.CharField(label="Special Request", widget=forms.Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'placeholder': 'Special Request',
'style': 'height: 100px;'
}
))
class Meta:
model = Reservation
fields = (
'name',
'email',
'reservation_date',
'people',
'request'
)
class ContactForm(forms.Form):
name = forms.CharField(label='Your Name', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'placeholder': 'Your Name'
}
))
email = forms.EmailField(label="Your Email", widget=forms.EmailInput(
attrs={
'class': 'form-control',
'id': 'email',
'placeholder': 'Your Email'
}
))
subject = forms.CharField(label='Subject', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'subject',
'placeholder': 'Subject'
}
))
message = forms.CharField(label="Message", widget=forms.Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'placeholder': 'Leave a message here',
'style': 'height: 150px;'
}
))
class FeedbackForm(forms.ModelForm):
name = forms.CharField(label='Your Name', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'placeholder': 'Your Name'
}
))
profession = forms.CharField(label="Your Profession", widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'email',
'placeholder': 'Your Profession'
}
))
feedback = forms.CharField(label="Feedback", widget=forms.Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'placeholder': 'Feedback...',
'style': 'height: 150px;'
}
))
photo = forms.FileField(label='Photo', widget=forms.FileInput(
attrs={
'type': 'file',
'class': 'form-control',
'id': 'subject',
'placeholder': 'Photo'
}
))
class Meta:
model = Testimonial
fields = (
'name',
'profession',
'feedback',
'photo'
)
|
Dantes696/restaraunt
|
res/forms.py
|
forms.py
|
py
| 4,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms.EmailField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.forms.EmailInput",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.forms.DateTimeField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.forms.DateTimeInput",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.forms.IntegerField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.forms.NumberInput",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.forms.CharField",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.forms.Textarea",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Reservation",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.forms.EmailField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.forms.EmailInput",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.forms.CharField",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.forms.CharField",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.forms.Textarea",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "django.forms.CharField",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.forms.CharField",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "django.forms.Textarea",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "django.forms.FileField",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.forms.FileInput",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "models.Testimonial",
"line_number": 131,
"usage_type": "name"
}
] |
7873679939
|
import numpy as np
from multiprocessing import Pool
h, w = 1080, 1920
def draw_pixel():
pixel = np.zeros(24, dtype=np.uint8)
for i in range(24):
pixel[i] = np.random.randint(0, 2)
return pixel
def draw_row(p):
row = np.zeros((24, w), dtype=np.uint8)
row[:, 0] = draw_pixel()
for j in range(1, w):
if np.random.binomial(1, p):
row[:, j] = draw_pixel()
else:
row[:, j] = row[:, j-1]
return row
def draw(p, pool_size=4, chunk_size=10):
with Pool(pool_size) as pool:
rows = pool.map(draw_row, [p]*h, chunksize=chunk_size)
imgs = np.zeros((24, h, w), dtype=np.uint8)
for i, row in enumerate(rows):
imgs[:, i, :] = row
return imgs
def draw_single_process(p):
imgs = np.zeros((24, h, w), dtype=np.uint8)
for i in range(h):
imgs[:, i, :] = draw_row(p)
return imgs
|
e841018/ERLE
|
rand_img.py
|
rand_img.py
|
py
| 888 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.binomial",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 31,
"usage_type": "attribute"
}
] |
40124065659
|
from pymongo.mongo_client import MongoClient
from pymongo.server_api import ServerApi
import certifi
from pprint import pprint
class database:
def __init__(self):
uri = "mongodb+srv://user:[email protected]/?retryWrites=true&w=majority"
# Create a new client and connect to the server
self.client = MongoClient(uri, tlsCAFile=certifi.where())
# Send a ping to confirm a successful connection
try:
self.client.admin.command('ping')
print("Pinged your deployment. You successfully connected to MongoDB!")
except Exception as e:
print(e)
self.db = self.client['Harvest-Hero']
|
SteveHuy/Harvest-Hero
|
Database+APIs/database.py
|
database.py
|
py
| 721 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymongo.mongo_client.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "certifi.where",
"line_number": 11,
"usage_type": "call"
}
] |
25097354504
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 21 13:56:29 2022
@author: maria
"""
import numpy as np
import pandas as pd
from numpy import zeros, newaxis
import matplotlib.pyplot as plt
import scipy as sp
from scipy.signal import butter,filtfilt,medfilt
import csv
import re
import functions2022_07_15 as fun
#getting the signal, for now using the raw F
animal= 'Hedes'
date= '2022-07-19'
#note: if experiment type not known, put 'suite2p' instead
experiment = '1'
#the file number of the NiDaq file, not alway experiment-1 because there might have been an issue with a previous acquisition etc
file_number = '0'
log_number = '0'
plane_number = '1'
#IMPORTANT: SPECIFY THE FRAME RATE
frame_rate = 15
#the total amount of seconds to plot
seconds = 5
#specify the cell for single cell plotting
res = ''
filePathF ='D://Suite2Pprocessedfiles//'+animal+ '//'+date+ '//'+res+'suite2p//plane'+plane_number+'//F.npy'
filePathops = 'D://Suite2Pprocessedfiles//'+animal+ '//'+date+ '//'+res+'suite2p//plane'+plane_number+'//ops.npy'
filePathmeta = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//NiDaqInput'+file_number+'.bin'
filePathlog = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//Log'+log_number+'.csv'
filePathArduino = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//ArduinoInput'+file_number+'.csv'
signal= np.load(filePathF, allow_pickle=True)
filePathiscell = 'D://Suite2Pprocessedfiles//'+animal+ '//'+date+ '//'+res+'suite2p//plane'+plane_number+'//iscell.npy'
iscell = np.load(filePathiscell, allow_pickle=True)
#loading ops file to get length of first experiment
ops = np.load(filePathops, allow_pickle=True)
ops = ops.item()
#loading ops file to get length of first experiment
ops = np.load(filePathops, allow_pickle=True)
ops = ops.item()
#printing data path to know which data was analysed
key_list = list(ops.values())
print(key_list[88])
print("frames per folder:",ops["frames_per_folder"])
exp= np.array(ops["frames_per_folder"])
#getting the first experiment, this is the length of the experiment in frames
exp1 = int(exp[0])
#getting second experiment
exp2 = int(exp[1])
#getting experiment 3
if exp.shape[0] == 3:
exp3 = int(exp[2])
"""
Step 1: getting the cell traces I need, here the traces for the first experiment
"""
#getting the F trace of cells (and not ROIs not classified as cells) using a function I wrote
signal_cells = fun.getcells(filePathF= filePathF, filePathiscell= filePathiscell).T
#%%
#
#getting the fluorescence for the first experiment
first_exp_F = signal_cells[:, 0:exp1]
# to practice will work with one cell for now from one experiment
cell = 33
F_onecell = signal[cell, 0:exp1]
# fig,ax = plt.subplots()
# plt.plot(F_onecell)
"""
Step 2: getting the times of the stimuli
"""
#getting metadata info, remember to choose the right number of channels!! for most recent data it's 5 (for data in March but after thr 16th it's 4 and 7 before that)
meta = fun.GetMetadataChannels(filePathmeta, numChannels=5)
#getting the photodiode info, usually the first column in the meta array
photodiode = meta[:,0]
#using the function from above to put the times of the photodiode changes (in milliseconds!)
photodiode_change = fun.DetectPhotodiodeChanges(photodiode,plot= True,lowPass=30,kernel = 101,fs=1000, waitTime=10000)
#the above is indiscriminate photodiode change, when it's on even numbers that is the stim onset
stim_on = photodiode_change[1::2]
# fig,ax = plt.subplots()
# ax.plot(stim_on)
"""
Step 3: actually aligning the stimuli with the traces (using Liad's function)
"""
tmeta= meta.T
frame_clock = tmeta[1]
frame_times = fun.AssignFrameTime(frame_clock, plot = False)
# frame_times1 = frame_times[1:]
frame_on = frame_times[::2]
frames_plane1 = frame_on[1::4]
frames_plane2 = frame_on[2::4]
#window: specify the range of the window
window= np.array([-1000, 4000]).reshape(1,-1)
aligned_all = fun.AlignStim(signal= signal_cells, time= frames_plane1, eventTimes= stim_on, window= window,timeLimit=1000)
#aligned: thetraces for all the stimuli for all the cells
aligned = aligned_all[0]
#the actual time, usually 1 second before and 4 seconds after stim onset in miliseconds
time = aligned_all[1]
#%%
"""
Step 4: getting the identity of the stimuli
"""
#need to get the log info file extraction to work
#getting stimulus identity
Log_list = fun.GetStimulusInfo (filePathlog, props = ["LightOn"])
#converting the list of dictionaries into an array and adding the time of the stimulus
#worked easiest by using pandas dataframe
log = np.array(pd.DataFrame(Log_list).values).astype(np.float64)
#log[0] is the degrees, log[1] would be spatial freq etc (depending on the order in the log list)
#no of stimuli specifes the total amount of stim shown
nr_stimuli = aligned.shape[1]
#%%
#getting one neuron for testing and plotting of a random stimulus:
neuron = 3
one_neuron = aligned[:,:,neuron]
fig,ax = plt.subplots()
ax.plot(time,one_neuron[:,])
ax.axvline(x=0, c="red", linestyle="dashed", linewidth = 1)
|
mariacozan/Analysis_and_Processing
|
code_archive/2022-07-21-neuronal_classification.py
|
2022-07-21-neuronal_classification.py
|
py
| 5,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "functions2022_07_15.getcells",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "functions2022_07_15.GetMetadataChannels",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "functions2022_07_15.DetectPhotodiodeChanges",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "functions2022_07_15.AssignFrameTime",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "functions2022_07_15.AlignStim",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "functions2022_07_15.GetStimulusInfo",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 146,
"usage_type": "name"
}
] |
74182080829
|
#!/usr/bin/env python
from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import json
import argparse
import time
import random
import uuid
ALL_POLICY = '''{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt''' + str(random.randint(100000, 999999)) +'''",
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
'''
def main(args):
if args.user_name:
attach_policy(args.user_name, 'UserName')
put_policy(args.user_name, 'UserName')
elif args.role_name:
attach_policy(args.role_name, 'RoleName')
put_policy(args.role_name, 'RoleName')
elif args.group_name:
attach_policy(args.group_name, 'GroupName')
put_policy(args.group_name, 'GroupName')
else:
print('No user, role, or group specified. Quitting.')
def attach_policy(principal, principal_name):
result = False
client = boto3.client('iam')
attach_policy_funcs = {
'UserName': client.attach_user_policy,
'RoleName': client.attach_role_policy,
'GroupName': client.attach_group_policy
}
attach_policy_func = attach_policy_funcs[principal_name]
try:
response = attach_policy_func(**{
principal_name: principal,
'PolicyArn': 'arn:aws:iam::aws:policy/AdministratorAccess'
}
)
result = True
print('AdministratorAccess policy attached successfully to ' + principal)
except ClientError as e:
print(e.response['Error']['Message'])
return result
def put_policy(principal, principal_name):
result = False
client = boto3.client('iam')
put_policy_funcs = {
'UserName': client.put_user_policy,
'RoleName': client.put_role_policy,
'GroupName': client.put_group_policy
}
put_policy_func = put_policy_funcs[principal_name]
try:
response = put_policy_func(**{
principal_name: principal,
'PolicyName': str(uuid.uuid4()),
'PolicyDocument': ALL_POLICY
}
)
result = True
print('All action policy attached successfully to ' + principal)
except ClientError as e:
print(e.response['Error']['Message'])
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Attempts to add an admin and all actions policy to the given role, user, or group.")
parser.add_argument('-u',
'--user-name')
parser.add_argument('-r',
'--role-name')
parser.add_argument('-g',
'--group-name')
args = parser.parse_args()
main(args)
|
dagrz/aws_pwn
|
elevation/add_iam_policy.py
|
add_iam_policy.py
|
py
| 2,724 |
python
|
en
|
code
| 1,106 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "boto3.client",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 86,
"usage_type": "call"
}
] |
27673024131
|
import torch
from torch import nn
def init_weights_(m: nn.Module,
val: float = 3e-3):
if isinstance(m, nn.Linear):
m.weight.data.uniform_(-val, val)
m.bias.data.uniform_(-val, val)
class Actor(nn.Module):
def __init__(self,
state_dim: int,
action_dim: int,
max_action: float = None,
dropout: float = None,
hidden_dim: int = 256,
uniform_initialization: bool = False) -> None:
super().__init__()
if dropout is None:
dropout = 0
self.max_action = max_action
self.actor = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Dropout(dropout),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.Dropout(dropout),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim)
)
def forward(self, state: torch.Tensor) -> torch.Tensor:
action = self.actor(state)
if self.max_action is not None:
return self.max_action * torch.tanh(action)
return action
class Critic(nn.Module):
def __init__(self,
state_dim: int,
action_dim: int,
hidden_dim: int = 256,
uniform_initialization: bool = False) -> None:
super().__init__()
self.q1_ = nn.Sequential(
nn.Linear(state_dim + action_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
self.q2_ = nn.Sequential(
nn.Linear(state_dim + action_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self,
state: torch.Tensor,
action: torch.Tensor):
concat = torch.cat([state, action], 1)
return self.q1_(concat), self.q2_(concat)
def q1(self,
state: torch.Tensor,
action: torch.Tensor) -> torch.Tensor:
return self.q1_(torch.cat([state, action], 1))
|
zzmtsvv/rl_task
|
spot/modules.py
|
modules.py
|
py
| 2,231 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.tanh",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 79,
"usage_type": "call"
}
] |
9651796880
|
import utils
from utils import *
# Arguments available
def parse_args():
parser = argparse.ArgumentParser(description='Task1')
parser.add_argument('--image_path', type=str, default=None,
help='Path to an image on which to apply Task1 (absolute or relative path)')
parser.add_argument('--save_path', type=str, default=None,
help='Path where to save the output of the algorithm (absolute or relative path)')
parser.add_argument('--path_dir', type=str, default="./dataset/Task1/",
help='Path to the directory where we want to apply Task1 (absolute or relative path)')
parser.add_argument('--save_dir', type=str, default='./dataset/predictions/Task1/',
help='Path where to save the directory where we want to apply Task1 (absolute or relative path)')
parser.add_argument('--no_file', type=str, default=None,
help='Apply the algorithm on the image specified by this number, that is located on path_dir. The output is saved on save_dir location')
parser.add_argument('--verbose', type=str, default='0',
help='Print intermediate output from the algorithm. Choose 0/1')
args = parser.parse_args()
return args
# Computes the logic behind task1.
# - first apply get_map to remove the scoring table and non-relevant ice-surfaces.
# - it finds and filters multiple circles extracted using houghCircles algorithm.
# Finally, it saves the result in the specified file.
def task1(image_path, save_path=None, verbose=0):
image = cv2.imread(image_path)
image = get_map(image=image, verbose=verbose)
image_all_circles, image_filtered_circles, circles_dict = get_hough_circles(image=image, min_radius=10, max_radius=25, minDist=30, dp=1, param1=150, param2=15,verbose=verbose)
if verbose:
utils.show_images([image, image_all_circles, image_filtered_circles], nrows=2, ncols=2)
string_to_write_in_file = "\n".join([str(len(circles_dict ["all_circles"])), str(len(circles_dict ["red_circles"])), str(len(circles_dict ["yellow_circles"]))])
if save_path != None and save_path != "":
with open(save_path, "w+") as f:
f.write(string_to_write_in_file)
print("The output was saved at location: {}!".format(save_path))
print(string_to_write_in_file)
#image_path = save_path.replace(".txt", ".png")
#cv2.imwrite(image_path, image_filtered_circles)
return circles_dict
if __name__ == "__main__":
args = parse_args()
verbose = ord(args.verbose) - ord('0')
if args.image_path != None:
try:
task1(image_path=args.image_path,
save_path=args.save_path,
verbose=verbose)
except:
raise Exception("An exception occured during the execution of Task1!")
else:
os.makedirs(args.save_dir, exist_ok=True)
if args.no_file != None:
try:
image_path = os.path.join(args.path_dir, "{}.png".format(args.no_file))
save_path = os.path.join(args.save_dir, "{}_predicted.txt".format(args.no_file))
print("Processing the image located at: {}".format(image_path))
task1(image_path=image_path,
verbose=verbose,
save_path=save_path)
except:
raise Exception("An exception occured during the execution of Task1 for the image located at: {}!".format(image_path))
else:
for no_file in range(1, 26):
try:
image_path = os.path.join(args.path_dir, "{}.png".format(no_file))
save_path = os.path.join(args.save_dir, "{}_predicted.txt".format(no_file))
print("Processing the image located at: {}".format(image_path))
task1(image_path=image_path,
verbose=verbose,
save_path=save_path)
except:
raise Exception("An exception occured during the execution of Task1 for the image located at: {}!".format(image_path))
|
SebastianCojocariu/Curling-OpenCV
|
task_1.py
|
task_1.py
|
py
| 3,759 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "utils.show_images",
"line_number": 32,
"usage_type": "call"
}
] |
29788980815
|
from collections import Counter
import numpy as np
import pandas as pd
import pickle
from sklearn import svm, model_selection, neighbors
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
from sklearn.model_selection import cross_validate, train_test_split
# processing data for Machine Learning
# groups of companies are likely to move together, some are going to move first
# pricing data to % change - will be our features and labels will find target (buy,sell or hold)
# ask question to data based on the price changes - within 7 days did the price go up or not (buy if yes, sell if no)
# each model is going to be on per company basis
def process_data_for_labels(ticker):
# next 7 days if price goes up or down
hm_days = 7
df = pd.read_csv('sp500_joined_closes.csv', index_col = 0)
tickers = df.columns.values.tolist()
df.fillna(0, inplace = True)
for i in range(1, hm_days+1):
# price in 2 days from now - todays price / todays price * 100
df['{}_{}d'.format(ticker, i)] = (df[ticker].shift(-i) - df[ticker]) / df[ticker] # (shift (-i) to move future prices up in table)
df.fillna(0, inplace = True)
return tickers, df
# function to detect buy,sell or hold stocks
def buy_sell_hold(*args):
cols = [c for c in args]
requirement = 0.02
for col in cols:
if col > requirement: # buy
return 1
if col < -requirement: # sell
return -1
return 0 # hold
def extract_featuresets(ticker):
tickers, df = process_data_for_labels(ticker)
# creating maps of either buy, sell or hold for 7 days
df['{}_target'.format(ticker)] = list(map( buy_sell_hold,
df['{}_1d'.format(ticker)],
df['{}_2d'.format(ticker)],
df['{}_3d'.format(ticker)],
df['{}_4d'.format(ticker)],
df['{}_5d'.format(ticker)],
df['{}_6d'.format(ticker)],
df['{}_7d'.format(ticker)],
))
# values are assigned to a list
vals = df['{}_target'.format(ticker)].values.tolist()
str_vals = [str(i) for i in vals]
# Data spread to see the spreads in value and filling spreads in list
print ('Data spread: ', Counter(str_vals))
df.fillna(0, inplace=True)
# replaces any infinite increase since it may be an IPO to a NaN
df = df.replace([np.inf,-np.inf], np.nan)
# dropping NaN
df.dropna(inplace=True)
# values are normalised in % change from yesterday
df_vals = df[[ticker for ticker in tickers ]].pct_change()
df_vals = df_vals.replace([np.inf,-np.inf], 0)
df_vals.fillna(0, inplace=True)
# x feature sets, y are labels
X = df_vals.values
y = df['{}_target'.format(ticker)].values
return X,y, df
def do_ml(ticker):
# where x is our target values and y is the value from buy_sell_hold() either 0,1,-1
X, y, df = extract_featuresets(ticker)
# training x and y using train_test_split with test_size of 25%
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.25)
# creating a classifier
# clf = neighbors.KNeighborsClassifier()
clf = VotingClassifier([('lsvc',svm.LinearSVC()), ('knn', neighbors.KNeighborsClassifier()),
('rfor', RandomForestClassifier(n_estimators=100))])
# fit x and y train into classifier
clf.fit(X_train, y_train)
# to know confidence of the data
confidence = clf.score(X_test, y_test)
print('Accuracy: ', confidence)
# predictions predicts x_test(futuresets)
predictions = clf.predict(X_test)
print('Predicted spread:', Counter(predictions))
return confidence
do_ml('TWTR')
|
mihir13/python_for_finance
|
PythonForFinance9.py
|
PythonForFinance9.py
|
py
| 3,409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.VotingClassifier",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 104,
"usage_type": "call"
}
] |
2542812722
|
import os
import json
import logging
from infy_bordered_table_extractor import bordered_table_extractor
from infy_bordered_table_extractor.bordered_table_extractor import OutputFileFormat
from infy_bordered_table_extractor.providers.tesseract_data_service_provider import TesseractDataServiceProvider
from infy_bordered_table_extractor.bordered_table_extractor import LineDetectionMethod
def __create_new_instance():
if not os.path.exists("./logs"):
os.makedirs("./logs")
logging.basicConfig(
filename=("./logs" + "/app_log.log"),
format="%(asctime)s- %(levelname)s- %(message)s",
level=logging.INFO,
datefmt="%d-%b-%y %H:%M:%S",
)
logger = logging.getLogger()
TESSERACT_PATH = os.environ['TESSERACT_PATH']
provider = TesseractDataServiceProvider(TESSERACT_PATH)
# input files path
temp_folderpath = './data/temp'
img_filepath = os.path.abspath(
'./data/sample_1.png')
table_object = bordered_table_extractor.BorderedTableExtractor(
provider, provider, temp_folderpath, logger, True)
return table_object, img_filepath
def test_bordered_table_extractor_bbox_RGBLineDetect():
"""test method"""
table_object, img_filepath = __create_new_instance()
save_folder_path = os.path.abspath('./data/output')
result = table_object.extract_all_fields(
img_filepath, within_bbox=[73, 2001, 4009, 937], config_param_dict={
'output': {'path': save_folder_path,
'format': [OutputFileFormat.EXCEL]}
}
)
__pretty_print(result)
assert result['error'] is None
assert __get_summary(result) == {
'table_count': 1,
'row_count': 5,
'col_count': [4, 4, 4, 4, 4]
}
def test_bordered_table_extractor_bbox_OpenCVLineDetect():
"""test method"""
table_object, img_filepath = __create_new_instance()
result = table_object.extract_all_fields(
img_filepath, within_bbox=[73, 2001, 4009, 937],
config_param_dict={'line_detection_method': [
LineDetectionMethod.OPENCV_LINE_DETECT]})
__pretty_print(result)
assert result['error'] is None
assert __get_summary(result) == {
'table_count': 1,
'row_count': 5,
'col_count': [4, 4, 4, 4, 4]
}
def test_bordered_table_extractor_with_custom_cells():
"""test method"""
table_object, img_filepath = __create_new_instance()
result = table_object.extract_all_fields(
img_filepath, within_bbox=[73, 2001, 4009, 937],
config_param_dict={
'custom_cells': [
{'rows': [1], 'columns':[1]}, {'rows': [2], 'columns':[2]}]
}
)
__pretty_print(result)
assert result['error'] is None
assert __get_summary(result) == {
'table_count': 1,
'row_count': 2,
'col_count': [3, 3]
}
def __get_summary(api_result):
row_count = -1
col_counts = []
for table in api_result['fields']:
rows = table['table_value']
row_count = len(rows)
for row in rows:
col_counts.append(len(row))
return {
'table_count': len(api_result['fields']),
'row_count': row_count,
'col_count': col_counts
}
def __pretty_print(dictionary):
p = json.dumps(dictionary, indent=4)
print(p.replace('\"', '\''))
|
Infosys/Document-Extraction-Libraries
|
infy_bordered_table_extractor/tests/test_border_table_img.py
|
test_border_table_img.py
|
py
| 3,357 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "infy_bordered_table_extractor.providers.tesseract_data_service_provider.TesseractDataServiceProvider",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "infy_bordered_table_extractor.bordered_table_extractor.BorderedTableExtractor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "infy_bordered_table_extractor.bordered_table_extractor",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "infy_bordered_table_extractor.bordered_table_extractor.OutputFileFormat.EXCEL",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "infy_bordered_table_extractor.bordered_table_extractor.OutputFileFormat",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "infy_bordered_table_extractor.bordered_table_extractor.LineDetectionMethod.OPENCV_LINE_DETECT",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "infy_bordered_table_extractor.bordered_table_extractor.LineDetectionMethod",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 105,
"usage_type": "call"
}
] |
23088053555
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Code by: Magnus Øye, Dated: 12.11-2018
Contact: [email protected]
Website: https://github.com/magnusoy/Balancing-Platform
"""
# Importing packages
import numpy as np
from numpy import sqrt, sin, cos, pi, arccos
import matplotlib.pylab as plt
# Plot style
plt.style.use("bmh")
# Constants
L = 45 # Length of one side
Z0 = 8.0 # Start lifting height
A = 4.0 # Center offset
r = 9.0 # Radius
countsPerRev = 400000 # Motor counts per revolution
pitch = 0 # Movement in Y-axis
roll = 0 # Movement in X-axis
anglesPitch = np.linspace(-0.139626, 0.139626, num=50) # Array of linearly spaced angels from -8, 8 degrees
anglesRoll = np.linspace(-0.139626, 0.139626, num=50) # Array of linearly spaced angels from -8, 8 degrees
# Lists for holding simulation data
X = []
Y1 = []
Y2 = []
Y3 = []
# Simulating platform movements
for angle in anglesPitch:
deg = angle * 180 / pi
pitch = angle
roll = 0
# Motor lift height
z1 = ((sqrt(3) * L) / 6) * sin(pitch) * cos(roll) + ((L/2)*sin(roll)) + Z0
z2 = ((sqrt(3) * L) / 6) * sin(pitch) * cos(roll) - ((L/2)*sin(roll)) + Z0
z3 = -((sqrt(3) * L) / 3) * sin(pitch) * cos(roll) + Z0
# Motor angles in radians
angleM1 = arccos(((z1**2) + (A**2) - (r**2)) / (2.0 * A * z1))
angleM2 = arccos(((z2**2) + (A**2) - (r**2)) / (2.0 * A * z2))
angleM3 = arccos(((z3**2) + (A**2) - (r**2)) / (2.0 * A * z3))
# Motor angles in degrees
degreeM1 = (angleM1 * 180.0) / pi
degreeM2 = (angleM2 * 180.0) / pi
degreeM3 = (angleM3 * 180.0) / pi
# Motor position in counts
outM1 = angleM1 * (countsPerRev / 2 * pi)
outM2 = angleM2 * (countsPerRev / 2 * pi)
outM3 = angleM3 * (countsPerRev / 2 * pi)
# Adding values in array for visual representation
X.append(deg)
Y1.append(z1)
Y2.append(z2)
Y3.append(z3)
# Plotting values
fig, axes = plt.subplots(1, 3, constrained_layout=True)
fig.suptitle('Pitch +/- 8 grader | Roll +/- 0 grader', size=16)
ax_m1 = axes[0]
ax_m2 = axes[1]
ax_m3 = axes[2]
ax_m1.set_title('Motor 1 løftehøyde')
ax_m2.set_title('Motor 2 løftehøyde')
ax_m3.set_title('Motor 3 løftehøyde')
ax_m1.set_xlabel('Rotasjon [Grader]')
ax_m2.set_xlabel('Rotasjon [Grader]')
ax_m3.set_xlabel('Rotasjon [Grader]')
ax_m1.set_ylabel('Høyde [cm]')
ax_m2.set_ylabel('Høyde [cm]')
ax_m3.set_ylabel('Høyde [cm]')
ax_m1.set_xlim(-8, 8)
ax_m2.set_xlim(-8, 8)
ax_m3.set_xlim(-8, 8)
ax_m1.set_ylim(0, 15)
ax_m2.set_ylim(0, 15)
ax_m3.set_ylim(0, 15)
ax_m1.plot(X, Y1, label='M1')
ax_m2.plot(X, Y2, label='M2')
ax_m3.plot(X, Y3, label='M3')
ax_m1.legend()
ax_m2.legend()
ax_m3.legend()
# Showing values
plt.show()
|
magnusoy/Balancing-Platform
|
src/balancing_platform/util/graphs.py
|
graphs.py
|
py
| 2,702 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pylab.style.use",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.style",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pylab",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.arccos",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.arccos",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.arccos",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplots",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.show",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 98,
"usage_type": "name"
}
] |
30272112886
|
from .views import *
from django.urls import path
urlpatterns = [
path('', home, name='home'),
path('login/', login_user, name='login'),
path('contact/', contact, name='contact'),
path('api/<str:userid>/', api, name='api'),
path('logout/', logout_user, name='logout'),
path('register/', register, name='register'),
path('server-maintenance/', freeze, name='freeze'),
path('exam-status/<str:user>/', exam_end, name='exam_end'),
path('exam-credential/', exam_authentication, name='exam_auth'),
path('exam-credential/auth-user/exam/<str:userid>/', exam, name='exam'),
path('activate/<uidb64>/<token>/<details>/', user_verification, name='activate')
]
|
supratim531/hetc-web
|
scholarship/urls.py
|
urls.py
|
py
| 693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
}
] |
20921293526
|
import numpy as np
import matplotlib.pyplot as plt
def plot_results(results, range_param, label='', color='r', marker='o'):
mean_results = np.mean(results, axis=1)
min_results = np.mean(results, axis=1) - np.std(results, axis=1)
max_results = np.mean(results, axis=1) + np.std(results, axis=1)
plt.plot(range_param, mean_results, marker=marker, color=color, label=label)
plt.fill_between(range_param, min_results, max_results, facecolor=color, interpolate=True, alpha=.2)
def save_results(results, range_param, directory, file_name):
file = open(directory + file_name, "w")
for i in range_param:
file.write(str(i) + " ")
file.write("\n")
for result_list in results:
for result in result_list:
file.write(str(result) + " ")
file.write(str("\n"))
file.close()
def load_results(directory, file_name):
file = open(directory + file_name, "r")
range_param = []
results = []
for i, line in enumerate(file):
if i == 0:
range_param = map(float, line.split())
else:
results.append(map(float, line.split()))
return range_param, results
def save_clusters(G, clusters, label, directory, file_name):
clusters_sorted = sorted(clusters, key=len, reverse=True)
file = open(directory + file_name, "w")
for i, c in enumerate(clusters_sorted):
c_sorted = sorted(c, key=G.degree, reverse=True)
file.write("\n\nCluster " + str(i) + " (" + str(len(c)) +" nodes)")
for u in c_sorted:
file.write("\n" + str(u) + ": " + label[u])
file.close()
|
sharpenb/Multi-Scale-Modularity-Graph-Clustering
|
Scripts/experiments/results_manager.py
|
results_manager.py
|
py
| 1,613 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.mean",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
}
] |
71588771707
|
import pytest
from pytest import approx
from brownie import chain
from brownie.test import given, strategy
from decimal import Decimal
from .utils import RiskParameter, transform_snapshot
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
@given(
initial_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
peek_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
dt=strategy('uint256', min_value='10', max_value='600'))
def test_volume_bid(state, market, feed, initial_fraction,
peek_fraction, dt, ovl, bob):
# have bob initially build a short to init volume
cap_notional = market.params(RiskParameter.CAP_NOTIONAL.value)
input_collateral = initial_fraction * cap_notional
input_leverage = 1000000000000000000
input_is_long = False
input_price_limit = 0
# approve max for bob
ovl.approve(market, 2**256-1, {"from": bob})
# build position for bob
market.build(input_collateral, input_leverage, input_is_long,
input_price_limit, {"from": bob})
# mine the chain forward
chain.mine(timedelta=dt)
fraction = int(peek_fraction * Decimal(1e18))
snap = market.snapshotVolumeBid()
data = feed.latest()
(_, micro_window, _, _, _, _, _, _) = data
# calculate what the volume bid should be given snapshot value
timestamp = chain[-1]['timestamp']
window = micro_window
value = fraction
snap = transform_snapshot(snap, timestamp, window, value)
(_, _, accumulator) = snap
expect = int(accumulator)
actual = int(state.volumeBid(market, fraction))
assert expect == approx(actual)
@given(
initial_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
peek_fraction=strategy('decimal', min_value='0.001', max_value='0.500',
places=3),
dt=strategy('uint256', min_value='10', max_value='600'))
def test_volume_ask(state, market, feed, initial_fraction,
peek_fraction, dt, ovl, alice):
# have alice initially build a long to init volume
cap_notional = market.params(RiskParameter.CAP_NOTIONAL.value)
input_collateral = initial_fraction * cap_notional
input_leverage = 1000000000000000000
input_is_long = True
input_price_limit = 2**256 - 1
# approve max for alice
ovl.approve(market, 2**256-1, {"from": alice})
# build position for alice
market.build(input_collateral, input_leverage, input_is_long,
input_price_limit, {"from": alice})
# mine the chain forward
chain.mine(timedelta=dt)
fraction = int(peek_fraction * Decimal(1e18))
snap = market.snapshotVolumeAsk()
data = feed.latest()
(_, micro_window, _, _, _, _, _, _) = data
# calculate what the volume ask should be given snapshot value
timestamp = chain[-1]['timestamp']
window = micro_window
value = fraction
snap = transform_snapshot(snap, timestamp, window, value)
(_, _, accumulator) = snap
expect = int(accumulator)
actual = int(state.volumeAsk(market, fraction))
assert expect == approx(actual)
@given(
initial_fraction_alice=strategy('decimal', min_value='0.001',
max_value='0.500', places=3),
initial_fraction_bob=strategy('decimal', min_value='0.001',
max_value='0.500', places=3),
dt=strategy('uint256', min_value='10', max_value='600'))
def test_volumes(state, market, feed, ovl, alice, bob,
initial_fraction_alice, initial_fraction_bob,
dt):
# have alice and bob initially build a long and short to init volume
cap_notional = market.params(RiskParameter.CAP_NOTIONAL.value)
input_collateral_alice = initial_fraction_alice * cap_notional
input_leverage_alice = 1000000000000000000
input_is_long_alice = True
input_price_limit_alice = 2**256 - 1
input_collateral_bob = initial_fraction_bob * cap_notional
input_leverage_bob = 1000000000000000000
input_is_long_bob = False
input_price_limit_bob = 0
# approve max for alice and bob
ovl.approve(market, 2**256-1, {"from": alice})
ovl.approve(market, 2**256-1, {"from": bob})
# build positions for alice and bob
market.build(input_collateral_alice, input_leverage_alice,
input_is_long_alice, input_price_limit_alice, {"from": alice})
market.build(input_collateral_bob, input_leverage_bob, input_is_long_bob,
input_price_limit_bob, {"from": bob})
# mine the chain forward
chain.mine(timedelta=dt)
data = feed.latest()
(_, micro_window, _, _, _, _, _, _) = data
# calculate what the bid should be given snapshot value
snap_bid = market.snapshotVolumeBid()
timestamp_bid = chain[-1]['timestamp']
window_bid = micro_window
snap_bid = transform_snapshot(snap_bid, timestamp_bid, window_bid, 0)
(_, _, accumulator_bid) = snap_bid
# calculate what the ask should be given snapshot value
snap_ask = market.snapshotVolumeAsk()
timestamp_ask = chain[-1]['timestamp']
window_ask = micro_window
snap_ask = transform_snapshot(snap_ask, timestamp_ask, window_ask, 0)
(_, _, accumulator_ask) = snap_ask
expect_volume_bid = int(accumulator_bid)
expect_volume_ask = int(accumulator_ask)
(actual_volume_bid, actual_volume_ask) = state.volumes(market)
assert expect_volume_bid == approx(int(actual_volume_bid))
assert expect_volume_ask == approx(int(actual_volume_ask))
|
overlay-market/v1-periphery
|
tests/state/test_volume.py
|
test_volume.py
|
py
| 5,680 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "pytest.fixture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.RiskParameter.CAP_NOTIONAL",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "utils.RiskParameter",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "brownie.chain.mine",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "brownie.chain",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "brownie.chain",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "utils.transform_snapshot",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pytest.approx",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "brownie.test.given",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.RiskParameter.CAP_NOTIONAL",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "utils.RiskParameter",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "brownie.chain.mine",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "brownie.chain",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "brownie.chain",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "utils.transform_snapshot",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pytest.approx",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "brownie.test.given",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "utils.RiskParameter.CAP_NOTIONAL",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "utils.RiskParameter",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "brownie.chain.mine",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "brownie.chain",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "brownie.chain",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "utils.transform_snapshot",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "brownie.chain",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "utils.transform_snapshot",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pytest.approx",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pytest.approx",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "brownie.test.given",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "brownie.test.strategy",
"line_number": 106,
"usage_type": "call"
}
] |
1306545281
|
import os, datetime
def call_msra():
terr = input('код территории: ')
if terr == "":
print()
call_msra()
comp = input('номер АРМа: ')
if comp == "":
print()
call_msra()
else:
os.system(r'C:\Windows\System32\msra.exe /offerra kmr-' + terr + '-' + comp)
try:
logging(1)
except BaseException:
print("Ошибка при запоси в log.txt")
print()
call_msra()
# чтение файла log.exe и запись в него счетчика открытых АРМов
# cnt - количество добавленных в лог элементов
def logging(cnt):
today = str(datetime.date.today())
# проверка, что файл существует. Если нет - то создается
try:
outputFile = open("log.txt", "r")
except FileNotFoundError:
outputFile = open("log.txt", "w+")
print("", file=outputFile)
# запись строк файла в lines
lines = []
for line in outputFile:
if line.rstrip() != "":
lines.append(line.rstrip())
outputFile.close()
# проверка, что файл не пустой, и что присутствует шапка
if len(lines) == 0:
lines.insert(0, "Date Count")
lastLine = lines[-1]
elif lines[0] != "Date Count":
lines.insert(0, "Date Count")
lastLine = lines[-1]
else:
lastLine = lines[-1]
# проверка, есть ли текущая дата в файле
# если нет, то добавляем ее со счетчиком 1
# если есть, то считвываем и увеличиваем значение счетчика
if lastLine.split()[0] != today:
lines.append(today + " 1")
f = open("log.txt", "w")
for line in lines:
if line != "":
print(line, file=f)
f.close()
else:
# проверка, что в счетчике на сегодня число
try:
oldCount = int(lastLine.split()[1])
except ValueError:
oldCount = 0
print("\n Счетчик за сегодня сброшен из-за нечислового значения!\n")
lines[-1] = today + " " + str(oldCount + cnt)
f = open("log.txt", "w")
for line in lines:
if line != "":
print(line, file=f)
f.close()
print('Данная программа открывает msra c параметром /offerra kmr-????-???')
call_msra()
|
Aarghe/some_scripts
|
msra/msra.py
|
msra.py
|
py
| 2,782 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.system",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 27,
"usage_type": "attribute"
}
] |
5405379024
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import random # imports relevant libraries
import operator
import matplotlib.pyplot
import agentframework
import csv
import matplotlib.animation
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
f = open('datain.txt') # opens csv file from directory
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) # reads csv and ensures any non mueric characters quoted
environment = [] #creates empty list to hold all data for environment
agents = [] #make list called agents
for row in reader: # A list of rows
rowlist = [] #creates empty list for rows
for value in row: # A list of value
rowlist.append(value) #move row values to row list
environment.append(rowlist) # append row lists to environment list
#print(environment) # Floats
matplotlib.pyplot.imshow(environment) #use this library to display raster values from environment list
matplotlib.pyplot.show()
f.close() # closes reader
def distance_between(agent0, agent1): #new function created to call pythhagorus calc for all looped agents
return (((agent0.x - agent1.x)**2) + ((agent0.y - agent1.y)**2))**0.5
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment,agents))
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x, agents[i].y)
matplotlib.pyplot.show()
for agent0 in agents:
for agent1 in agents:
distance = distance_between(agent0, agent1)
|
cman2000/Portfolioabm
|
model.py
|
model.py
|
py
| 1,832 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.reader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_NONNUMERIC",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.pyplot.imshow",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.show",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "agentframework.Agent",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot.ylim",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.xlim",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.imshow",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.scatter",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.show",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
}
] |
14698252975
|
from flask import Flask, request, jsonify
from SSAPI import app, api, db, guard
from flask_restplus import Resource, reqparse, inputs
import flask_praetorian
from SSAPI.models import *
@api.route('/Scrimmages')
class ScrimmageList(Resource):
@flask_praetorian.auth_required
def get(self):
""" Returns a list of Scrimmages """
current_id = flask_praetorian.current_user().id
current_user_roles = flask_praetorian.current_user().roles
# Filtering/sorting
parser = reqparse.RequestParser()
parser.add_argument('role', type=str) # role (advisor or presenter)
parser.add_argument('all', type=inputs.boolean) # all admin only
parser.add_argument('scrimmage_complete', type=inputs.boolean) # Completed?
args = parser.parse_args()
query = None
if args["all"]:
if "admin" in current_user_roles:
query = Scrimmage.query
else:
query = Scrimmage.query.filter(
(Scrimmage.advisors.any(User.id == current_id)) |
(Scrimmage.presenters.any(User.id == current_id)))
else:
query = Scrimmage.query.filter(
(Scrimmage.advisors.any(User.id == current_id)) |
(Scrimmage.presenters.any(User.id == current_id)))
if args["role"]:
if "advisor" in args["role"]:
query = query.filter(
Scrimmage.advisors.any(User.id == current_id))
if "presenter" in args["role"]:
query = query.filter(Scrimmage.presenters.any(
User.id == current_id))
if args["scrimmage_complete"] is not None:
query = query.filter(
Scrimmage.scrimmage_complete == args["scrimmage_complete"])
ret = []
result = query.all()
for i in result:
ret.append(i.as_dict())
resp = jsonify(ret)
return resp
@flask_praetorian.auth_required
def post(self):
""" Create a new Scrimmage """
parser = reqparse.RequestParser()
parser.add_argument('subject', required=True, type=str)
parser.add_argument('schedule', required=True, type=str)
parser.add_argument('scrimmage_type', required=True, type=str)
parser.add_argument('presenters', required=True, type=list, location="json")
parser.add_argument('max_advisors', type=int)
args = parser.parse_args()
if not args["max_advisors"]:
args["max_advisors"] = 5
new_scrimmage = Scrimmage(subject=args["subject"],
schedule=args["schedule"],
scrimmage_complete=False,
scrimmage_type=args["scrimmage_type"],
max_advisors=args["max_advisors"])
for i in args["presenters"]:
scrimmage_user = User.query.filter_by(id=i).first()
if "presenter" in scrimmage_user.roles:
new_scrimmage.presenters.append(scrimmage_user)
else:
resp = jsonify({"message": "Unable to locate or invalid user for presenter"})
resp.status_code = 400
return resp
db.session.add(new_scrimmage)
db.session.commit()
resp = jsonify(new_scrimmage.as_dict())
resp.status_code = 200
return resp
@api.route('/Scrimmages/<int:id>')
class Scrimmages(Resource):
@flask_praetorian.auth_required
def get(self, id):
""" Returns info about a Scrimmage """
scrimmage = Scrimmage.query.filter_by(id=id).first()
return jsonify(scrimmage.as_dict())
@flask_praetorian.auth_required
def post(self, id):
""" Updates a scrimmage """
scrimmage = Scrimmage.query.filter_by(id=id).first()
parser = reqparse.RequestParser()
parser.add_argument('subject', type=str)
parser.add_argument('schedule', type=str)
parser.add_argument('scrimmage_type', type=str)
parser.add_argument('presenters', type=list, location="json")
parser.add_argument('advisors', type=list, location="json")
parser.add_argument('max_advisors', type=int)
parser.add_argument('scrimmage_complete', type=inputs.boolean)
args = parser.parse_args()
# If I am an admin, OR one of the presenters, I can modify
user_id = flask_praetorian.current_user().id
user = User.query.filter_by(id=user_id).first()
if (user in scrimmage.presenters or
'admin' in flask_praetorian.current_user().roles):
update_dict = {}
for param in args.keys():
if args[param]:
new_presenters = []
new_advisors = []
if "presenters" in param:
for i in args[param]:
new_presenter = User.query.filter_by(id=i).first()
if new_presenter and 'presenter' in new_presenter.roles:
new_presenters.append(new_presenter)
else:
resp = jsonify({"message": "Unable to locate or invalid user for presenter"})
resp.status_code = 400
return resp
scrimmage.presenters = new_presenters
elif "advisors" in param:
for i in args[param]:
new_advisor = User.query.filter_by(id=i).first()
if new_advisor and 'advisor' in new_advisor.roles:
new_advisors.append(new_advisor)
else:
resp = jsonify({"message": "Unable to locate or invalid user for advisor"})
resp.status_code = 400
return resp
scrimmage.advisors = new_advisors
else:
update_dict[param] = args[param]
if update_dict:
Scrimmage.query.filter_by(id=id).update(update_dict)
db.session.commit()
else:
resp = jsonify({"message": "Unauthorized to update"})
resp.status_code = 401
return resp
resp = jsonify(scrimmage.as_dict())
resp.status_code = 200
return resp
@flask_praetorian.auth_required
def delete(self, id):
""" Delete a Scrimmage """
# If I am an admin, OR one of the presenters, I can delete
user_id = flask_praetorian.current_user().id
user = User.query.filter_by(id=user_id).first()
scrimmage = Scrimmage.query.filter_by(id=id).first()
if (user in scrimmage.presenters or
'admin' in flask_praetorian.current_user().roles):
Scrimmage.query.filter_by(id=id).delete()
db.session.commit()
return 'Scrimmage Deleted', 204
return 'UNAUTHORIZED', 401
|
ktelep/SSAPI
|
SSAPI/scrimmage_views.py
|
scrimmage_views.py
|
py
| 7,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask_restplus.Resource",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask_praetorian.current_user",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.current_user",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_restplus.reqparse.RequestParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_restplus.reqparse",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask_restplus.inputs.boolean",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.inputs",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask_restplus.inputs.boolean",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.inputs",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.auth_required",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.reqparse.RequestParser",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask_restplus.reqparse",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session.add",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "SSAPI.db",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "SSAPI.db.session.commit",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "SSAPI.db",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.auth_required",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "SSAPI.api.route",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "SSAPI.api",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask_restplus.Resource",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.auth_required",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.reqparse.RequestParser",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "flask_restplus.reqparse",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "flask_restplus.inputs.boolean",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.inputs",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "flask_praetorian.current_user",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.current_user",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session.commit",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "SSAPI.db",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.auth_required",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "flask_praetorian.current_user",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "flask_praetorian.current_user",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session.commit",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "SSAPI.db.session",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "SSAPI.db",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "flask_praetorian.auth_required",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "SSAPI.api.route",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "SSAPI.api",
"line_number": 94,
"usage_type": "name"
}
] |
42132347145
|
import math
import numpy as np
from scipy.stats import bernoulli
simlen = 1000000
pmf = np.full(10,0.1)
def cdf(k):
if(k>10):
return 1
elif(k<=0):
return 0
else:
return k*0.1
print("Value equal to 7:")
p1 = pmf[7]
data_bern1 = bernoulli.rvs(size=simlen,p=p1)
err_ind1 = np.nonzero(data_bern1 == 1)
print("Probability-simulation,actual:",round(np.size(err_ind1)/simlen,4),round(p1,2))
#print("Simulated values: ", data_bern1)
print("Value greater than 7:")
p2 = cdf(10)-cdf(7)
data_bern2 = bernoulli.rvs(size=simlen ,p=p2)
err_ind2 = np.nonzero(data_bern2 == 1)
print("Probability-simulation,actual:",round(np.size(err_ind2)/simlen,4),round(p2,2))
#print("Simulated values: ", data_bern2)
print("Value less than 7:")
p3 = cdf(6)
data_bern3 = bernoulli.rvs(size=simlen ,p=p3)
err_ind3 = np.nonzero(data_bern3 == 1)
print("Probability-simulation,actual:",round(np.size(err_ind3)/simlen, 4),round(p3,2))
#print("Simulated values: ", data_bern3)
|
gadepall/digital-communication
|
exemplar/10/13/3/30/codes/code.py
|
code.py
|
py
| 984 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "numpy.full",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli.rvs",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "numpy.nonzero",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli.rvs",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.nonzero",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli.rvs",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "numpy.nonzero",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 35,
"usage_type": "call"
}
] |
38740337725
|
import pytest
import numpy as np
from uncoverml import patch
@pytest.mark.parametrize('make_multi_patch',
['make_patch_31', 'make_patch_11'],
indirect=True)
def test_grid_patch(make_multi_patch):
timg, pwidth, tpatch, tx, ty = make_multi_patch
patches = patch.grid_patches(timg, pwidth)
assert np.allclose(patches, tpatch)
def test_point_patches(make_points):
timg, pwidth, points, tpatch = make_points
patches = np.array(list(patch.point_patches(timg, pwidth, points)))
assert np.allclose(patches, tpatch)
|
GeoscienceAustralia/uncover-ml
|
tests/test_patch.py
|
test_patch.py
|
py
| 593 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "uncoverml.patch.grid_patches",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "uncoverml.patch",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.allclose",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "uncoverml.patch.point_patches",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "uncoverml.patch",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "numpy.allclose",
"line_number": 25,
"usage_type": "call"
}
] |
43984207586
|
# gdpyt-analysis: test.test_fit_3dsphere
"""
Notes
"""
# imports
from os.path import join
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from correction import correct
from utils import fit, plotting, functions
# read dataframe
fp = '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/02.07.22_membrane_characterization/analysis/tests/compare-interior-particles-per-test/' \
'df_id11.xlsx'
df = pd.read_excel(fp)
microns_per_pixel = 1.6
correctX = df.x.to_numpy()
correctY = df.y.to_numpy()
correctZ = df.z_corr.to_numpy()
raw_data = np.stack([correctX, correctY, correctZ]).T
xc = 498 * microns_per_pixel
yc = 253 * microns_per_pixel
zc = 3
r_edge = 500 * microns_per_pixel
# fit a sphere to 3D points
def fit_sphere(spX, spY, spZ):
# Assemble the A matrix
spX = np.array(spX)
spY = np.array(spY)
spZ = np.array(spZ)
A = np.zeros((len(spX), 4))
A[:, 0] = spX * 2
A[:, 1] = spY * 2
A[:, 2] = spZ * 2
A[:, 3] = 1
# Assemble the f matrix
f = np.zeros((len(spX), 1))
f[:, 0] = (spX * spX) + (spY * spY) + (spZ * spZ)
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for the radius
t = (C[0] * C[0]) + (C[1] * C[1]) + (C[2] * C[2]) + C[3]
radius = math.sqrt(t)
return radius, C[0], C[1], C[2]
# fit a sphere to 3D points
def fit_spherexy(spX, spY, spZ, xc, yc):
# Assemble the A matrix
spX = np.array(spX)
spY = np.array(spY)
spZ = np.array(spZ)
A = np.zeros((len(spX), 2))
A[:, 0] = spZ * 2
A[:, 1] = 1
# Assemble the f matrix
f = np.zeros((len(spX), 1))
f[:, 0] = (spX * spX) + (spY * spY) + (spZ * spZ) - (2 * spX * xc) - (2 * spY * yc) # + xc ** 2 + yc ** 2
# least squares fit
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for the radius
t = (xc**2) + (yc**2) + (C[0] * C[0]) + C[1]
radius = math.sqrt(t)
return radius, C[0]
def fit_ellipsoid_from_center(X, Y, Z, xc, yc, zc, r):
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
f = np.zeros((len(X), 1))
f[:, 0] = -1 * ((Z * Z) - (2 * zc * Z) + (zc * zc))
A = np.zeros((len(X), 1))
A[:, 0] = ((X * X) - (2 * xc * X) + (xc * xc) + (Y * Y) - (2 * yc * Y) + (yc * yc)) / (r * r) - 1
# least squares fit
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for radius in z-dir.
r_z = math.sqrt(C[0])
return r_z
def calc_spherical_angle(r, xyz):
"""
Given a point (x, y, z) approx. on a sphere of radius (r), return the angle phi and theta of that point.
:param r:
:param xyz:
:return:
"""
x, y, z = xyz[0], xyz[1], xyz[2]
if np.abs(z) > r:
return np.nan, np.nan
else:
phi = np.arccos(z / r)
if x < 0 and y < 0:
theta_half = np.arccos(x / (r * np.sin(phi)))
theta_diff = np.pi - theta_half
theta = np.pi + theta_diff
else:
theta = np.arccos(x / (r * np.sin(phi)))
return phi, theta
# fit 3d ellipsoid
r_z = fit_ellipsoid_from_center(correctX, correctY, correctZ, xc, yc, zc, r_edge)
# general 3d sphere fit
rr, xx0, yy0, zz0 = fit_sphere(correctX, correctY, correctZ)
# custom 3d sphere fit
r, z0 = fit_spherexy(correctX, correctY, correctZ, xc, yc)
x0, y0 = xc, yc
phis = []
thetas = []
for i in range(raw_data.shape[0]):
x, y, z, = raw_data[i, 0], raw_data[i, 1], raw_data[i, 2]
dx = x - x0
dy = y - y0
dz = z - z0
if x < x0 * 0.5:
phi, theta = calc_spherical_angle(r, xyz=(dx, dy, dz))
if any([np.isnan(phi), np.isnan(theta)]):
continue
else:
# phis.append(phi)
thetas.append(theta)
if x < x0:
phi, theta = calc_spherical_angle(r, xyz=(dx, dy, dz))
if any([np.isnan(phi), np.isnan(theta)]):
continue
else:
phis.append(phi)
phis = np.array(phis)
thetas = np.array(thetas)
# ----------------------------------- PLOTTING ELLIPSOID
custom_ellipsoid = True
if custom_ellipsoid:
u = np.linspace(thetas.min(), thetas.max(), 20)
v = np.linspace(0, np.pi/2, 20)
u, v = np.meshgrid(u, v)
xe = r_edge * np.cos(u) * np.sin(v)
ye = r_edge * np.sin(u) * np.sin(v)
ze = r_z * np.cos(v)
xe = xe.flatten() + xc
ye = ye.flatten() + yc
ze = ze.flatten() + zc
# --- plot sphere
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(x, y, z, color="r")
#ax.plot_surface(xe, ye, ze, cmap='coolwarm', alpha=0.5)
ax.scatter(xe, ye, ze, zdir='z', s=20, c='r', rasterized=True)
ax.scatter(correctX, correctY, correctZ, zdir='z', s=2, c='b', rasterized=True, alpha=0.25)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(15, 255)
plt.show()
raise ValueError('ah')
# ----------------------------------- PLOTTING SPHERES
gen_sphere, custom_sphere = True, True
# --- calculate points on sphere
if custom_sphere:
u, v = np.mgrid[thetas.min():thetas.max():20j, 0:phis.max():20j]
x=np.cos(u)*np.sin(v)*r
y=np.sin(u)*np.sin(v)*r
z=np.cos(v)*r
x = x + x0
y = y + y0
z = z + z0
# --- plot sphere
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(x, y, z, color="r")
ax.plot_surface(x, y, z, cmap='coolwarm', alpha=0.5)
ax.scatter(correctX, correctY, correctZ, zdir='z', s=20, c='b', rasterized=True)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(15, 255)
plt.show()
# plot sphere viewed from above
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, cmap='coolwarm', alpha=0.5)
ax.scatter(correctX, correctY, correctZ, zdir='z', s=20, c='b', rasterized=True)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(90, 255)
plt.show()
if gen_sphere:
x2 = np.cos(u) * np.sin(v) * rr
y2 = np.sin(u) * np.sin(v) * rr
z2 = np.cos(v) * rr
x2 = x2 + xx0
y2 = y2 + yy0
z2 = z2 + zz0
# plot spheres
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, cmap='coolwarm', alpha=0.5)
ax.plot_surface(x2, y2, z2, cmap='cool', alpha=0.5)
# ax.scatter(correctX, correctY, correctZ, zdir='z', s=20, c='b', rasterized=True)
ax.set_xlabel(r'$x \: (\mu m)$')
ax.set_ylabel(r'$y \: (\mu m)$')
zlabel = ax.set_zlabel(r'$z \: (\mu m)$')
ax.view_init(15, 255)
plt.show()
j = 1
|
sean-mackenzie/gdpyt-analysis
|
test/test_fit_3dsphere.py
|
test_fit_3dsphere.py
|
py
| 6,764 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_excel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.lstsq",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.lstsq",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.lstsq",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "numpy.arccos",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.arccos",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "numpy.arccos",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "numpy.meshgrid",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "numpy.mgrid",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "numpy.cos",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 253,
"usage_type": "name"
}
] |
15306305960
|
""" WENO Lax-Friedrichs
Author: Pierre-Yves Taunay
Date: November 2018
"""
import numpy as np
import matplotlib.pyplot as plt
###############
#### SETUP ####
###############
# Grid
npt = 200
L = 2
dz = L/npt
zvec = np.linspace(-L/2 + dz/2,L/2-dz/2,npt)
EPS = 1e-16
# Time
dt = dz / 1 * 0.4
tmax = 2000
tc = 0
# Scheme
# Flux can be 'LF', 'LW', 'FORCE' ,'FLIC'
order = 5
flux_type = 'FORCE'
# Data holders
#uvec = np.ones(len(zvec))
uvec = np.zeros(len(zvec))
def f_0(u):
# -0.8 < x < -0.6
b = (zvec>=-0.8) & (zvec<=-0.6)
z = zvec[b]
u[b] = np.exp(-np.log(2)*(z+0.7)**2/9e-4)
# -0.4 < x < -0.2
b = (zvec>=-0.4) & (zvec<=-0.2)
u[b] = 1
# 0 < x < 0.2
b = (zvec>=0) & (zvec<=0.2)
z = zvec[b]
u[b] = 1 - np.abs(10*z-1)
# 0.4 < x < 0.6
b = (zvec>=0.4) & (zvec<=0.6)
z = zvec[b]
u[b] = np.sqrt(1- 100*(z-0.5)**2)
# b = (zvec>=-0.5) & (zvec<=0.5)
# u[b] = 0
f_0(uvec)
u0 = uvec
#######################
#### TIME MARCHING ####
#######################
idx = 0
### WENO 3
# vi+1/2[0]^L : 1/2 v_i + 1/2 v_{i+1}
# vi+1/2[1]^L : -1/2 v_{i-1} + 3/2 v_i
# vi-1/2[0]^R : 3/2 v_i - 1/2 v_{i+1}
# vi-1/2[1]^R : 1/2 v_{i-1} + 1/2 v_i
def compute_weights(up1,u,um1):
if order == 3:
d0 = 2/3
d1 = 1/3
beta0 = (up1-u)**2
beta1 = (u-um1)**2
alpha0 = d0 / (EPS+beta0)**2
alpha1 = d1 / (EPS+beta1)**2
alphat0 = d1 / (EPS+beta0)**2
alphat1 = d0 / (EPS+beta1)**2
alphasum = alpha0+alpha1
alphatsum = alphat0 + alphat1
w0 = alpha0 / alphasum
w1 = alpha1 / alphasum
wt0 = alphat0 / alphatsum
wt1 = alphat1 / alphatsum
return w0,w1,wt0,wt1
elif order == 5:
up2 = np.roll(u,-2)
um2 = np.roll(u,2)
d0 = 3/10
d1 = 3/5
d2 = 1/10
beta0 = 13/12*(u-2*up1+up2)**2 + 1/4*(3*u-4*up1+up2)**2
beta1 = 13/12*(um1-2*u+up1)**2 + 1/4*(um1-up1)**2
beta2 = 13/12*(um2-2*um1+u)**2 + 1/4*(um2-4*um1+3*u)**2
alpha0 = d0/(EPS+beta0)**2
alpha1 = d1/(EPS+beta1)**2
alpha2 = d2/(EPS+beta2)**2
alphat0 = d2/(EPS+beta0)**2
alphat1 = d1/(EPS+beta1)**2
alphat2 = d0/(EPS+beta2)**2
alphasum = alpha0 + alpha1 + alpha2
alphatsum = alphat0 + alphat1 + alphat2
w0 = alpha0/alphasum
w1 = alpha1/alphasum
w2 = alpha2/alphasum
wt0 = alphat0/alphatsum
wt1 = alphat1/alphatsum
wt2 = alphat2/alphatsum
return w0,w1,w2,wt0,wt1,wt2
def compute_lr(up1,u,um1):
if order == 3:
u0p = 1/2*u + 1/2*up1
u1p = -1/2*um1 + 3/2*u
u0m = 3/2*u - 1/2*up1
u1m = 1/2*um1 + 1/2*u
w0,w1,wt0,wt1 = compute_weights(up1,u,um1)
uL = w0*u0p + w1*u1p
uR = wt0*u0m + wt1*u1m
elif order == 5:
up2 = np.roll(up1,-1)
um2 = np.roll(um1,1)
u0m = 11/6*u - 7/6*up1 + 1/3*up2
u1m = 1/3*um1 + 5/6*u - 1/6*up1
u2m = -1/6*um2 + 5/6*um1 + 1/3*u
u0p = 1/3*u + 5/6*up1 - 1/6*up2
u1p = -1/6*um1 + 5/6*u + 1/3*up1
u2p = 1/3*um2 -7/6*um1 + 11/6*u
w0,w1,w2,wt0,wt1,wt2 = compute_weights(up1,u,um1)
uL = w0*u0p + w1*u1p + w2*u2p
uR = wt0*u0m + wt1*u1m + wt2*u2m
return uL,uR
def flux(u):
return u
def compute_flux(u):
# u_{i+1}, u_{i-1}
up1 = np.roll(u,-1)
um1 = np.roll(u,1)
# Reconstruct the data on the stencil
uL, uR = compute_lr(up1,u,um1)
# Compute the RHS flux
up1h = np.roll(uR,-1) # This will contain u_{i+1/2}^R
um1h = np.roll(uL,1) # This will contain u_{i-1/2}^L
fpR = 0
fpL = 0
if flux_type == 'LF':
fpR = compute_flux_lf(uL,up1h)
fpL = compute_flux_lf(um1h,uR)
elif flux_type == 'LW':
fpR = compute_flux_lw(uL,up1h)
fpL = compute_flux_lw(um1h,uR)
elif flux_type == 'FORCE':
fpR = compute_flux_force(uL,up1h)
fpL = compute_flux_force(um1h,uR)
return -1/dz * (fpR-fpL)
def compute_flux_lf(uL,uR):
### Left, right
fL = flux(uL)
fR = flux(uR)
alpha = 1 # Derivative of flux
return 1/2*(fL+fR-alpha*(uR-uL))
def compute_flux_lw(uL,uR):
alpha = 1
u_lw = 1/2 * (uL+uR) - 1/2*alpha*(flux(uR)-flux(uL))
return flux(u_lw)
def compute_flux_force(uL,uR):
f_lf = compute_flux_lf(uL,uR)
f_lw = compute_flux_lw(uL,uR)
return 1/2*(f_lf + f_lw)
while tc<tmax:
u = uvec
u1 = u + dt * compute_flux(u)
u2 = 3/4*u + 1/4*u1 + 1/4* dt * compute_flux(u1)
unp1 = 1/3*u + 2/3*u2 + 2/3 * dt * compute_flux(u2)
uvec = unp1
tc = tc+dt
plt.plot(zvec,u0,'-')
plt.plot(zvec,uvec,'o')
print("L1:",np.sum(np.abs(u0-uvec)/len(u0)))
|
pytaunay/weno-tests
|
python/advection_1d/weno-advection.py
|
weno-advection.py
|
py
| 5,051 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.linspace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 226,
"usage_type": "call"
}
] |
75163149306
|
# -*- coding: utf-8 -*-
"""
Flask Skeleton
"""
from flask import Blueprint, request, redirect, url_for, render_template, flash, session
from pymongo import errors as mongo_errors
from bson.objectid import ObjectId
from flask_login import login_required
import datetime
from app import mongo, login_manager
from app.usuario.model import Usuario
@login_manager.user_loader
def load_user(usuario_id):
return Usuario.get_by_id(usuario_id)
post = Blueprint('post', __name__)
@post.route('/blogs/<blog_id>/posts/novo', methods=['GET'])
@login_required
def get_novo(blog_id):
data_cadastro = datetime.datetime.utcnow()
return render_template('blog/form-post.html', data_cadastro=data_cadastro, blog_id=blog_id)
@post.route('/blogs/<blog_id>/posts/novo', methods=['POST'])
@login_required
def post_novo(blog_id):
data_cadastro = datetime.datetime.utcnow()
try:
post = mongo.db.blog.update_one(
{"_id": ObjectId(blog_id)},
{"$push": {
"posts": {
"_id": ObjectId(),
"titulo": request.form['titulo'],
"data_cadastro": data_cadastro,
"secoes": [{
"titulo": request.form['titulo'],
"data_cadastro": data_cadastro,
"conteudo": request.form['conteudo'],
"secoes": []
}]
}
}})
except mongo_errors.OperationFailure as e:
return render_template('db_error.html', error=e)
return redirect(url_for('blog.get_blog', blog_id=blog_id))
# (?) @post.route('/posts/<post_id>', methods=['GET'])
@post.route('/blogs/<blog_id>/posts/<post_id>', methods=['GET'])
def get_post(blog_id, post_id):
"""Detalha um post específico
"""
try:
blog = mongo.db.blog.find_one(
{
'_id': ObjectId(blog_id),
'posts': {'$elemMatch': {'_id': ObjectId(post_id)}}
},
{'titulo': 1, 'posts.$': 1}
)
except mongo_errors.OperationFailure as e:
return render_template('db_error.html', error=e)
# print(blog)
return render_template('blog/post-detalhe.html', blog=blog, blog_id=blog_id)
|
e-ruiz/big-data
|
01-NoSQL/atividade-04/src/app/blog/posts.py
|
posts.py
|
py
| 2,268 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "app.usuario.model.Usuario.get_by_id",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.usuario.model.Usuario",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "app.login_manager.user_loader",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "app.login_manager",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.Blueprint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask_login.login_required",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "app.mongo.db.blog.update_one",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "app.mongo.db",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "app.mongo",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pymongo.errors.OperationFailure",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "pymongo.errors",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask_login.login_required",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "app.mongo.db.blog.find_one",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "app.mongo.db",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "app.mongo",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pymongo.errors.OperationFailure",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "pymongo.errors",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 76,
"usage_type": "call"
}
] |
2026773879
|
import json
import os
import pathlib
import time
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Chrome()
targetUrl = 'https://www.douban.com/'
username = ""
psw = ""
def login_zhi_hu():
loginurl = targetUrl # 登录页面
# 加载webdriver驱动,用于获取登录页面标签属性
# driver = webdriver.Chrome()
driver.get(loginurl) # 请求登录页面
# time.sleep(50)
# driver.implicitly_wait(10)
driver.switch_to.frame(driver.find_elements_by_tag_name('iframe')[0])
bottom = driver.find_element_by_xpath('/html/body/div[1]/div[1]/ul[1]/li[2]') # 获取用户名输入框,并先清空
# bottom = driver.find_element_by_class_name('account-tab-account on')
bottom.click()
driver.find_element_by_name('username').send_keys(username) # 输入用户名
driver.find_element_by_name('password').clear() # 获取密码框,并清空
driver.find_element_by_name('password').send_keys(psw) # 输入密码
# #
time.sleep(5)
bottom = driver.find_element_by_class_name('account-form-field-submit ')
bottom.click()
time.sleep(4)
auth_frame = driver.find_element_by_id('tcaptcha_iframe')
driver.switch_to.frame(auth_frame)
element = driver.find_element_by_xpath('//*[@id="tcaptcha_drag_thumb"]')
ActionChains(driver).click_and_hold(on_element=element).perform()
ActionChains(driver).move_to_element_with_offset(to_element=element, xoffset=180, yoffset=0).perform()
tracks = get_tracks(25) # 识别滑动验证码设置了个随意值,失败概率很大,网上方案抓取缺口图片分析坐标,成功率提高,考虑智能识别为最佳方案
for track in tracks:
# 开始移动move_by_offset()
ActionChains(driver).move_by_offset(xoffset=track, yoffset=0).perform()
# 7.延迟释放鼠标:release()
time.sleep(0.5)
ActionChains(driver).release().perform()
def get_tracks(distance):
"""
拿到移动轨迹,模仿人的滑动行为,先匀加速后匀减速
匀变速运动基本公式:
①v = v0+at
②s = v0t+1/2at^2
"""
# 初速度
v = 0
# 单位时间为0.3s来统计轨迹,轨迹即0.3内的位移
t = 0.31
# 位置/轨迹列表,列表内的一个元素代表0.3s的位移
tracks = []
# 当前位移
current = 0
# 到达mid值开始减速
mid = distance * 4 / 5
while current < distance:
if current < mid:
# 加速度越小,单位时间内的位移越小,模拟的轨迹就越多越详细
a = 2.3
else:
a = -3
# 初速度
v0 = v
# 0.3秒内的位移
s = v0 * t + 0.5 * a * (t ** 2)
# 当前的位置
current += s
# 添加到轨迹列表
tracks.append(round(s))
# 速度已经到达v,该速度作为下次的初速度
v = v0 + a * t
return tracks
def login_with_cookies():
driver.get(targetUrl)
with open("cookies.txt", "r") as fp:
cookies = json.load(fp)
for cookie in cookies:
driver.add_cookie(cookie)
driver.get(targetUrl)
update_cookies()
def update_cookies():
f = open("cookies.txt", 'w')
f.truncate()
cookies = driver.get_cookies()
with open("cookies.txt", "w") as fp:
json.dump(cookies, fp)
def is_file_exit():
path = pathlib.Path('cookies.txt')
if not os.path.getsize(path):
return False
return path.is_file()
if __name__ == '__main__':
if is_file_exit():
login_with_cookies()
else:
login_zhi_hu()
time.sleep(4)
cookies = driver.get_cookies()
with open("cookies.txt", "w") as fp:
json.dump(cookies, fp)
|
Nienter/mypy
|
personal/douban.py
|
douban.py
|
py
| 3,789 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 118,
"usage_type": "call"
}
] |
19400090459
|
from typing import List
class Solution:
def minFallingPathSum(self, A: List[List[int]]) -> int:
h = len(A)
w = len(A[0])
for i in range(1,h):
for j in range(w):
if j == 0:
A[i][j] = min(A[i-1][j] + A[i][j],A[i-1][j+1] + A[i][j])
elif j == w - 1:
A[i][j] = min(A[i-1][j-1] + A[i][j],A[i-1][j] + A[i][j])
else:
A[i][j] = min(A[i-1][j-1] + A[i][j],A[i-1][j] + A[i][j],A[i-1][j+1] + A[i][j])
print(A)
return min(A[-1])
A = [[51,24],[-50,82]]
r = Solution().minFallingPathSum(A)
print(r)
|
Yigang0622/LeetCode
|
minFallingPathSum.py
|
minFallingPathSum.py
|
py
| 653 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
2502699508
|
# To manage matrices correctly
# At deployment, check if new matrices have been added to old batch sizes
import grid
import orjson
import sys
# VERSION_FILE
VERSION_FILE = "versioning.json"
def readable_string(batch, num_infected, infection_rate):
m,n = grid.parse_batch(batch)
return f'{n} Samples (with {m} tests. Upto {num_infected} positives)'
def update_cache(mlabels, matrices, codenames, jfile):
old_data = {}
f = {}
try:
with open(jfile, 'rb') as reader:
old_data = orjson.loads(reader.read())
except Exception as e:
print(f'Error : {e}')
for batch in mlabels:
print(batch)
m,n,i = mlabels[batch]
mat = matrices[m]
g, c = grid.generate_grid_and_cell_data(batch, mat)
f[batch] = {m : {"num_infected" : n, "infection_rate" : i, "readable" : readable_string(batch, n, i), "gridData" : g, "cellData" : c, "matrix" : m, "codename" : codenames[m]}}
ob = set(old_data)
nb = set(f)
for batch in old_data:
od = old_data[batch]
# Batch does not exist in new data
if batch not in f or not f[batch]:
print(f"Batch {batch} not in new matrix data, marking as inactive")
od["metadata"]["active"] = False
continue
nd = f[batch]
oa = od["metadata"]["active"]
oam = od["metadata"]["matrices"][-1]
if oam in nd:
# Currently active matrix in old data is same as new data
if not oa:
od["metadata"]["active"] = True
od[m] = nd[m]
continue
# If old batch is not active, check if there is a key in new data
if not oa:
for m in nd:
# Mark m as active, increment version, add to od
od["metadata"]["latest_version"] += 1
od["metadata"]["matrices"].append(m)
od["metadata"]["active"] = True
od[m] = nd[m]
continue
# Make matrix in new data active
for m in nd:
# Mark m as active, increment version, add to od
od["metadata"]["latest_version"] += 1
od["metadata"]["matrices"].append(m)
od["metadata"]["active"] = True
od[m] = nd[m]
# New batches can be safely added to old_data
for batch in nb - ob:
print(f"New batch added - {batch}")
od = {"metadata" : {}}
od["metadata"]["active"] = True
od["metadata"]["latest_version"] = 0
nd = f[batch]
for m in nd:
od["metadata"]["matrices"] = [m]
od[m] = nd[m]
old_data[batch] = od
jstr = orjson.dumps(old_data)
with open(jfile, "wb") as outfile:
outfile.write(jstr)
def load_cache():
data = {}
try:
with open(VERSION_FILE, 'rb') as reader:
data = orjson.loads(reader.read())
except Exception as e:
raise
active_batches = {}
all_batches = {}
for batch in data:
meta = data[batch]["metadata"]
mats = meta["matrices"]
is_active = meta["active"]
mat_names = set(data[batch]) - {"metadata"}
curr_version = len(mats) - 1
for i, m in enumerate(mats):
all_batches[f'{batch}_v{i}'] = data[batch][m]
if i == curr_version and is_active:
active_batches[f'{batch}_v{i}'] = data[batch][m]
# Active batches to be sorted by number of samples
sorted_bnames = sorted((grid.parse_batch(b)[1], b) for b in active_batches)
sorted_active_batches = {b : active_batches[b] for n, b in sorted_bnames}
bbs = {b : grid.batch_size_from_batch_name(b) for b in all_batches}
batch_size_to_batch = {}
for bn, bs in bbs.items():
batch_size_to_batch[bs] = batch_size_to_batch.get(bs, [])
batch_size_to_batch[bs].append({bn : all_batches[bn]["codename"]})
return sorted_active_batches, all_batches, batch_size_to_batch
if __name__ == '__main__':
from compute_wrapper import get_matrix_sizes_and_labels, get_matrix_labels_and_matrices, get_matrix_codenames
update_cache(get_matrix_sizes_and_labels(), get_matrix_labels_and_matrices(), get_matrix_codenames(), VERSION_FILE)
|
Aakriti28/tapestry-server
|
old-server/matrix_manager.py
|
matrix_manager.py
|
py
| 4,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "grid.parse_batch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "orjson.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "grid.generate_grid_and_cell_data",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "orjson.dumps",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "orjson.loads",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "grid.parse_batch",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "grid.batch_size_from_batch_name",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "compute_wrapper.get_matrix_sizes_and_labels",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "compute_wrapper.get_matrix_labels_and_matrices",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "compute_wrapper.get_matrix_codenames",
"line_number": 109,
"usage_type": "call"
}
] |
8063903284
|
import logging
import subprocess
from subprocess import Popen, PIPE
def run(command: str) -> None:
"""
:param command: shell statement
:return:
"""
logging.debug(command)
subprocess.call(command, shell=True, universal_newlines=True)
def call(command: str) -> str:
"""
:param command: shell statement
:return the result of execute the shell statement
"""
logging.debug(command)
with Popen(command, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True) as fd:
out, err = fd.communicate()
if fd.returncode:
raise Exception(err.strip())
logging.debug(out.strip())
return out.strip()
def ssh_call(address: str, work_dir: str, command: str) -> str:
"""
:param address: the remote server ip
:param work_dir: the remote server dir
:param command: the shell statement
:return the result of execute the shell statement
"""
return call(
"""
ssh -q {address} 'cd {work_dir} && {command}'
"""
.format(address=address, work_dir=work_dir, command=command)
)
|
leaderli/li_py
|
li/li_bash.py
|
li_bash.py
|
py
| 1,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.debug",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 25,
"usage_type": "call"
}
] |
24769179889
|
squaredWeight = None
def performCollection(cityLevel, filename):
import os
if cityLevel:
outputDir = 'GoogleTrendsCity/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
else:
outputDir = 'GoogleTrendsCountry/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
import time
from pytrends.request import TrendReq
pytrends = TrendReq()
count = 0
for keyword in kw_list:
count += 1
if not '/' in keyword:
filename = outputDir+ keyword + '.pickle'
from os import path
if not path.exists(filename):
pytrends.build_payload([keyword])
if cityLevel:
df = pytrends.interest_by_region(resolution='CITY', inc_low_vol=True, inc_geo_code=False)
else:
df = pytrends.interest_by_region(resolution='COUNTRY', inc_low_vol=True, inc_geo_code=False)
import pickle
outfile = open(filename,'wb')
pickle.dump(df,outfile)
outfile.close()
#time.sleep(3)
print(count)
def formCityList(filename):
filenameToWriteTo = "allCities.pickle"
from os import path
if not path.exists(filenameToWriteTo):
outputDir = 'GoogleTrendsCity/'
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
count = 0
allCities = {}
for keyword in kw_list:
print(count)
if count != 897:
filename = outputDir+ keyword + '.pickle'
from os import path
if path.exists(filename):
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
if len(df) != 0:
cities = list(df['geoName'])
latLong = list(df['coordinates'])
for i in range(0, len(cities), 1):
cityName = cities[i]
if not cityName.lower() in allCities:
allCities[cityName.lower()] = latLong[i]
count += 1
import pickle
outfile = open(filenameToWriteTo,'wb')
pickle.dump(allCities, outfile)
outfile.close()
def averageAndStdDevAcrossAssociationsMadeByGoogle(cityLevel, filename):
import os
if cityLevel:
outputDir = 'GoogleTrendsCity/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
else:
outputDir = 'GoogleTrendsCountry/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
import os
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
print(len(kw_list))
count = 0
valuesReturned = []
zeroValueCount = 0
for keyword in kw_list:
if keyword != 'con':
filename = outputDir+ keyword + '.pickle'
from os import path
if path.exists(filename):
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
try:
valuesReturned.append(len(df))
except:
zeroValueCount += 1
count += 1
import numpy as np
print(np.average(valuesReturned))
print(np.std(valuesReturned))
print(zeroValueCount)
print(count)
def assignRegion(cityLevel, filename, outputFile):
import os
outputDirAssignRegion = 'AssignRegion/'
if not os.path.exists(outputDirAssignRegion):
os.mkdir(outputDirAssignRegion)
outputDirAssignRegion = 'AssignRegionWeightSquared/'
if not os.path.exists(outputDirAssignRegion):
os.mkdir(outputDirAssignRegion)
outputDirAssignRegion = 'AssignRegion/'
if squaredWeight:
outputDirAssignRegion = 'AssignRegionWeightSquared/'
isoToLat, isoToLong = getCountryInfo()
print(isoToLat)
print(isoToLong)
import os
if cityLevel:
outputDir = 'GoogleTrendsCity/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
else:
outputDir = 'GoogleTrendsCountry/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
noData = 0
noWeightsOver0 = 0
rows = [['keyword', 'using top 1', 'using top 3', 'using weight > 50', 'all']]
keywordToRegion1 = {}
keywordToRegion2 = {}
keywordToRegion3 = {}
keywordToRegion4 = {}
for keyword in kw_list:
if keyword != 'con':
filename = outputDir+ keyword + '.pickle'
from os import path
if path.exists(filename):
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
dataReturnedByTrends = False
try:
weights = list(df['value'])
weightsValues = []
for value in weights:
weightsValues.append(value[0])
df['weights'] = weightsValues
df = df.loc[df['weights'] > 0]
dataReturnedByTrends = True
except:
noData += 1
if dataReturnedByTrends:
if len(df) > 0:
df1 = df.nlargest(1, 'weights')
df2 = df.nlargest(3, 'weights')
df3 = df.loc[df['weights'] > 50]
df4 = df
label1 = predictRegion(cityLevel, df1, isoToLong)
if label1 != None:
keywordToRegion1[keyword] = label1
label2 = predictRegion(cityLevel, df2, isoToLong)
if label2 != None:
keywordToRegion2[keyword] = label2
label3 = predictRegion(cityLevel, df3, isoToLong)
if label3 != None:
keywordToRegion3[keyword] = label3
label4 = predictRegion(cityLevel, df4, isoToLong)
if label4 != None:
keywordToRegion4[keyword] = label4
if label1 != None or label2 != None or label3 != None or label4 != None:
rows.append([keyword, label1, label2, label3, label4])
else:
noWeightsOver0 += 1
print(str(noData) + " out of " + str(len(kw_list)) + " tokens had no data.")
print(str(noWeightsOver0) + " out of " + str(len(kw_list)) + " tokens had no weights.")
writeRowsToCSV(rows, outputDirAssignRegion+outputFile)
rows = [['Resriction', 'Predictions', 'NA_SA', 'AF_EUR', 'AS_OC', 'Total Accuracy', 'Total Predictions']]
rows.append(['using top 1']+evaluatePredictions(keywordToRegion1))
rows.append(['using top 3']+evaluatePredictions(keywordToRegion2))
rows.append(['using weight > 50']+evaluatePredictions(keywordToRegion3))
rows.append(['all']+evaluatePredictions(keywordToRegion4))
writeRowsToCSV(rows, outputDirAssignRegion+"Performance"+outputFile)
def predictRegion(cityLevel, df, isoToLong):
import numpy as np
if cityLevel:
geoNameToCoordinates = dict(zip(list(df["geoName"]), list(df['coordinates'])))
geoNameToWeight = dict(zip(list(df["geoName"]), list(df['weights'])))
label = None
l1 = 0
l2 = 0
l3 = 0
for geoName in geoNameToCoordinates:
coordinate = geoNameToCoordinates[geoName]
weight = geoNameToWeight[geoName]
if squaredWeight:
weight = weight*weight
long = coordinate['lng']
if long <= -25:
l1 += weight
elif long <= 65:
l2 += weight
else:
l3 += weight
Americas = l1
Africa_Europe = l2
Asia_Australia = l3
total = l1+l2+l3
if total > 0:
ratioAmericas = float(Americas)/float(total)
ratioAfrica_Europe = float(Africa_Europe)/float(total)
ratioAsia_Australia = float(Asia_Australia)/float(total)
ratioMax = np.max([ratioAmericas, ratioAfrica_Europe, ratioAsia_Australia])
label = None
if ratioAmericas == ratioMax:
label = "Americas"
elif ratioAfrica_Europe == ratioMax:
label = "Africa_Europe"
else:
label = "Asia_Australia"
else:
label = None
else:
countryISOCodeToWeight = dict(zip(list(df["geoCode"]), list(df['weights'])))
label = None
l1 = 0
l2 = 0
l3 = 0
for countryISOCode in countryISOCodeToWeight:
weight = countryISOCodeToWeight[countryISOCode]
long = isoToLong[countryISOCode]
if long <= -25:
l1 += weight
elif long <= 65:
l2 += weight
else:
l3 += weight
Americas = l1
Africa_Europe = l2
Asia_Australia = l3
total = l1+l2+l3
if total > 0:
ratioAmericas = float(Americas)/float(total)
ratioAfrica_Europe = float(Africa_Europe)/float(total)
ratioAsia_Australia = float(Asia_Australia)/float(total)
ratioMax = np.max([ratioAmericas, ratioAfrica_Europe, ratioAsia_Australia])
label = None
if ratioAmericas == ratioMax:
label = "Americas"
elif ratioAfrica_Europe == ratioMax:
label = "Africa_Europe"
else:
label = "Asia_Australia"
else:
label = None
return label
def getCountryInfo():
#file with average lat, long for each country
#country info from: https://gist.github.com/tadast/8827699#file-countries_codes_and_coordinates-csv
import pandas as pd
filePath = 'countries_codes_and_coordinates.csv'
df=pd.read_csv(filePath, encoding='utf-8')
print(df.columns)
temp = list(df["Alpha-2 code"])
countryList = []
for isoCode in temp:
countryList.append(str(isoCode).strip().replace('"', ''))
latitudeList = []
temp = list(df['Latitude (average)'])
for s in temp:
latitudeList.append(float(s.strip().replace('"', '')))
longitudeList = []
temp = list(df['Longitude (average)'])
for s in temp:
longitudeList.append(float(s.strip().replace('"', '')))
isoToLat = dict(zip(countryList, latitudeList))
isoToLong = dict(zip(countryList, longitudeList))
isoToLat['CW'] = 12.1696
isoToLong['CW'] = -68.9900
isoToLat['XK'] = 42.6026
isoToLong['XK'] = 20.9030
isoToLat['SX'] = 18.0425
isoToLong['SX'] = -63.0548
isoToLat['MF'] = 18.0826
isoToLong['MF'] = -63.0523
isoToLat['AX'] = 60.1785
isoToLong['AX'] = 19.9156
isoToLat['BL'] = 17.9000
isoToLong['BL'] = -62.8333
isoToLat['BQ'] = 12.1684
isoToLong['BQ'] = -68.3082
return isoToLat, isoToLong
def writeRowsToCSV(rows, fileToWriteToCSV):
import csv
if len(rows) > 0:
with open(fileToWriteToCSV, "w", encoding='utf-8') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(rows)
fp.close()
print("Written " + str(len(rows)) + " rows to: " + fileToWriteToCSV)
def evaluatePredictions(tokenToPrediction):
import pandas as pd
filePath = "Input/combineDBsCoordinateGroundTruthDiv3.csv"
df=pd.read_csv(filePath, encoding='utf-8')
tokenToLabel = dict(zip(list(df["id"]), list(df['label'])))
l1 = 0
l2 = 0
l3 = 0
for token in tokenToPrediction:
prediction = tokenToPrediction[token]
if prediction == 'Americas':
l1 += 1
elif prediction == 'Africa_Europe':
l2 += 1
else:
l3 += 1
print(str(l1) + ", " + str(l2) + ", " + str(l3) + " Americas vs. Africa_Europe vs. Asia_Australia")
correct = {'Americas':0,'Africa_Europe':0,'Asia_Australia':0}
wrong = {'Americas':0,'Africa_Europe':0,'Asia_Australia':0}
for token in tokenToPrediction:
label = tokenToLabel[token]
prediction = tokenToPrediction[token]
if label == prediction:
if label == 'Americas':
correct['Americas'] += 1
elif label == 'Africa_Europe':
correct['Africa_Europe'] += 1
elif label == 'Asia_Australia':
correct['Asia_Australia'] += 1
else:
print("unknown label")
import sys
sys.exit()
else:
if label == 'Americas':
wrong['Americas'] += 1
elif label == 'Africa_Europe':
wrong['Africa_Europe'] += 1
elif label == 'Asia_Australia':
wrong['Asia_Australia'] += 1
else:
print("unknown label")
import sys
sys.exit()
import numpy as np
accuracy = float(np.sum(list(correct.values())))/float(np.sum(list(correct.values()))+np.sum(list(wrong.values())))
row = []
predictions = []
for key in ['Americas', 'Africa_Europe', 'Asia_Australia']:
predictions.append(float(correct[key]+wrong[key]))
precision = []
for key in ['Americas', 'Africa_Europe', 'Asia_Australia']:
precision.append(round(float(correct[key])/float(correct[key]+wrong[key])*100,2))
row = [str(predictions)]+precision
row += [round(accuracy*100, 2), float(np.sum(list(correct.values()))+np.sum(list(wrong.values())))]
return row
def compareQueryCityLocationVsTopTrendingCityLocation():
rows = [['query city', 'query city geo', 'top Google Trends city', 'top city geo', 'distance between two']]
distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends = []
noWeightsOver0 = 0
noData = 0
filename = "allCities.pickle"
import pickle
infile = open(filename,'rb')
cityToLatLong = pickle.load(infile)
infile.close()
count = 0
for cityName in cityToLatLong:
if not '/' in cityName:
queryCityCoordinates = (cityToLatLong[cityName]['lat'], cityToLatLong[cityName]['lng'])
queryCityName = cityName
outputDir = 'GoogleTrendsCity/'
filename = outputDir+ cityName + '.pickle'
from os import path
if path.exists(filename):
count += 1
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
try:
weights = list(df['value'])
weightsValues = []
for value in weights:
weightsValues.append(value[0])
df['weights'] = weightsValues
df = df.loc[df['weights'] > 0]
if len(df) > 0:
df1 = df.nlargest(1, 'weights')
topGoogleTrendCityCoordinates = list(df1['coordinates'])[0]
topGoogleTrendCityName = list(df1['geoName'])[0]
topGoogleTrendCityCoordinates = (topGoogleTrendCityCoordinates['lat'], topGoogleTrendCityCoordinates['lng'])
from geopy.distance import geodesic
from geopy.distance import great_circle
distanceBetweenTheTwo = geodesic(queryCityCoordinates, topGoogleTrendCityCoordinates).miles
distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends.append(distanceBetweenTheTwo)
rows.append([queryCityName, str(queryCityCoordinates), topGoogleTrendCityName, str(topGoogleTrendCityCoordinates), distanceBetweenTheTwo])
else:
noWeightsOver0 += 1
except:
noData += 1
print(str(noData) + " out of " + str(count) + " tokens had no data.")
print(str(noWeightsOver0) + " out of " + str(count) + " tokens had no weights.")
import numpy as np
print(np.average(distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends))
print(np.std(distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends))
writeRowsToCSV(rows, 'topCityAnalysis.csv')
if __name__ == '__main__':
pass
step1 = False
if step1:
performCollection(True, 'Input/459.pickle') #Google Trends at city level
performCollection(True, 'Input/3183.pickle') #Google Trends at city level
performCollection(False, 'Input/459.pickle') #Google Trends at country level
performCollection(False, 'Input/3183.pickle') #Google Trends at country level
'''Google Trends does not always return the same number of cities
the following code examines average/standard deviation for the number of cities returned'''
if False:
averageAndStdDevAcrossAssociationsMadeByGoogle(True, 'Input/459.pickle')
averageAndStdDevAcrossAssociationsMadeByGoogle(True, 'Input/3183.pickle')
averageAndStdDevAcrossAssociationsMadeByGoogle(False, 'Input/459.pickle')
averageAndStdDevAcrossAssociationsMadeByGoogle(False, 'Input/3183.pickle')
step2 = True
if step2:
squaredWeight = True #This parameter raises the weight associated by Google via weight=weight*weight
filename = 'Input/459.pickle'
outputFilename = '459.csv'
assignRegion(True, filename, str(True)+outputFilename)
assignRegion(False, filename, str(False)+outputFilename)
filename = 'Input/3183.pickle'
outputFilename = '3183.csv'
assignRegion(True, filename, str(True)+outputFilename)
assignRegion(False, filename, str(False)+outputFilename)
'''Google Trends at city resolution associates tokens with city locations
For each city, the city name and its coordinates are stored in file "allCities.pickle"
Next we send each city name to Google Trends and utilize the top city result
For example 'chicago' is sent and the top city result from Google Trends is returned
The coordinates for both city query and the Google trend city are known
These coordinates are used to compute distance in miles.
Over 4789 cities on average the top city result from Google Trends is 362 miles away +/- 1335 miles.
So Google Trends not same as geocoding, but for query such as Moscow Google is able to capture that
this query is not likely to be utilized by Russian speakers in Moscow since those would like utilize
Cyrilis version.
The results of comparison for each city stored in: topCityAnalysis.csv'''
step3 = False
if step3:
formCityList('Input/3183.pickle') #forms list of cities from Google Trend Associations, stores into "allCities.pickle"
performCollection(True, "allCities.pickle") #Google Trends at city level
compareQueryCityLocationVsTopTrendingCityLocation()
|
apanasyu/GoogleTrends
|
Main.py
|
Main.py
|
py
| 20,395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytrends.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pytrends.request.TrendReq",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pytrends.request.build_payload",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pytrends.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pytrends.request.interest_by_region",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pytrends.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "pytrends.request.interest_by_region",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pytrends.request",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 424,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "geopy.distance.geodesic",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 461,
"usage_type": "call"
}
] |
37164877474
|
import torch
import numpy
import pandas
import sys
import os
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
#Global option defaults that can be changed later by command line
gcm_folder_path : str = "gcms"
target_folder_path : str = "targets"
class_index = "cat5"
use_cuda : bool = False
train_split: float = 0.8
test_split: float = 0.1
validation_split: float = 0.1
batch_size: int = 10
max_training_epochs: int = 200
CMD_HELP : str = """Options:
--cuda
uses nVidia CUDA acceleration for tensor calculations (recommended)
--batch-size <batch size>
sets the mini-batch size to use for training. Defaults to 10 if not supplied
--gcms-path <folder/directory path>
sets the path for the GCM CSV files to use as input. Defaults to ./gcms if not supplied
--targets_path <folder/directory path>
sets the path for the CSV files that contains the "cat5" class label column. Defaults to ./targets if not supplied. Note that a model is trained for each file that is found.
--validation_percentage
sets the percentage of instances to use as the validation set
--test_percentage
sets the percentage of instances to use as the final test set
"""
torch.set_printoptions(precision = 10)
def normalise(t: torch.tensor):
max: float = t.max()
min: float = t.min()
t = ((t - min) / (max - min)) #implicit broadcasting applied on scalars
return t
def parse_command_line():
i = 1 #sys.argv[0] contains the script name itself and can be ignored
while i < len(sys.argv):
if sys.argv[i] == "-h" or sys.argv[i] == "--help":
print(CMD_HELP)
sys.exit()
elif sys.argv[i] == "--gcms-path":
i += 1
global gcm_folder_path
gcm_folder_path = sys.argv[i]
elif sys.argv[i] == "--classlabel":
i += 1
global class_index
class_index = sys.argv[i]
elif sys.argv[i] == "--cuda":
global use_cuda
use_cuda = True
elif sys.argv[i] == "--targets-path":
i += 1
global target_folder_path
target_folder_path = sys.argv[i]
elif sys.argv[i] == "--test-percentage":
i += 1
global test_split
test_percentage = float(sys.argv[i]) / 100.0
elif sys.argv[i] == "--validation-percentage":
i += 1
global validation_split
validation_percentage = float(sys.argv[i]) / 100.0
elif sys.argv[i] == "--batch-size":
i += 1
global batch_size
batch_size = int(sys.argv[i])
elif sys.argv[i] == "--max-epochs":
i += 1
global max_training_epochs
max_training_epochs = int(sys.argv[i])
else:
print("Unknown argument: " + sys.argv[i] + "\n Use \"gcm-cnn -h\" to see valid commands")
sys.exit()
i += 1
global train_split
train_split = 1.0 - test_split - validation_split
assert(train_split > 0), "No instances left for training. Did the sum of your test and validation holdout percentages exceed 100%?"
assert(batch_size > 0), "Batch size can't be negative!!!"
def read_gcm_folder(path: str): #returns a folder of GCM CSVs as a 4-channel PyTorch Tensors
filenames = os.listdir(path)
files = []
for i in range(0, len(filenames)):
nextfile = pandas.read_csv((path + "/" + filenames[i]), sep=",", skiprows=3, header=None) #explicitly skip 3 rows to discard header, longitude, latitude
nextfile = nextfile.drop(nextfile.columns[0], axis=1)
nextfile = torch.from_numpy(nextfile.values).type(torch.FloatTensor)
if use_cuda == True:
nextfile = nextfile.cuda()
nextfile = nextfile.reshape(288,131,360)
nextfile = normalise(nextfile)
files.append(nextfile)
return torch.stack(files, dim=1)
def read_target_folder(path: str): #returns a folder of CSVs containing the class label as a list of PyTorch Tensors
filenames = os.listdir(path)
files = []
for i in range(0, len(filenames)):
nextfile = pandas.read_csv((path + "/" + filenames[i]), sep=",")
nextfile = nextfile[class_index] + 2
nextfile = torch.from_numpy(nextfile.values).type(torch.LongTensor)
if use_cuda == True:
nextfile = nextfile.cuda()
files.append(nextfile)
return files
class Network(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=len(os.listdir(gcm_folder_path)), out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features= 30276, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=5)
#note hyperparameter choice is arbitrary except initial in and final out
#they are dependant on the colour channels (3 since 3 GCMs) and output classes (5 since 5 classes on cat5) respectively
def forward(self, t):
# implement the forward pass
# (1) input layer
t = t #usually omitted since this is obviously trivial; size 360*131
# (2) hidden conv layer
t = self.conv1(t) #Haven't implemented wrapping - so after a 5x5 convolution, discard borders meaning feature maps are now 6 * 127 * 356 (Channels * height * width)
t = F.relu(t)
t = F.avg_pool2d(t, kernel_size=2, stride=2)
#pooling 2x2 with stride 2 - reduces to 6 * 178 * 63
# (3) hidden conv layer
t = self.conv2(t)
t = F.relu(t)
t = F.avg_pool2d(t, kernel_size=2, stride=2)
#pooling 2x2 with stride 2 - reduces to 12 * 29 * 87
# (4) hidden linear layer
t = t.reshape(-1, 12 * 29 * 87)
t = self.fc1(t)
t = F.relu(t)
# (5) hidden linear layer
t = self.fc2(t)
t = F.relu(t)
# (6) output layer
t = self.out(t)
#t = F.softmax(t, dim=1) #implicitly performed by F.cross_entropy()
return t
#Setting options from command line
parse_command_line()
#print(target_tensors[0].size()[0])
#Reading files from disk into PyTorch tensors
label_tensors = read_target_folder(target_folder_path)
gcm_tensor = read_gcm_folder(gcm_folder_path)
#Split the gcm_tensor into train, validation, test tensors
instances = gcm_tensor.size()[0]
train_tensor = gcm_tensor[:int(instances * train_split)] #note int() truncates/floors
validation_tensor = gcm_tensor[int(instances * train_split):int(instances * (train_split + validation_split))]
test_tensor = gcm_tensor[int(instances * (train_split + validation_split)):]
#Now we set up a loop to train a network for each label file that was present
for n in range(0, len(label_tensors)):
#Creating pytorch dataset and dataloader for easy access to minibatch sampling without replacement in randomnised order
train_set = torch.utils.data.TensorDataset(train_tensor, (label_tensors[n])[ : int(instances * train_split)])
train_loader = torch.utils.data.DataLoader(train_set, batch_size = batch_size, shuffle=True)
validation_set = torch.utils.data.TensorDataset(validation_tensor, (label_tensors[n])[int(instances * train_split) : int(instances * (train_split + validation_split))])
validation_loader = torch.utils.data.DataLoader(validation_set, batch_size=validation_tensor.size()[0], shuffle = False)
test_set = torch.utils.data.TensorDataset(test_tensor, (label_tensors[n])[int(instances * (train_split + validation_split)) : ])
test_loader = torch.utils.data.DataLoader(test_set, batch_size = test_tensor.size()[0], shuffle = False)
#Initialising the CNN and gradient descender (optimizer)
network = Network()
if use_cuda == True:
network = network.cuda()
optimizer = optim.SGD(network.parameters(), lr = 0.01)
#running the training loop
epoch_correct : int = 0
epoch_loss : float = 0
lowest_valid_loss : float = float('inf')
epochs_without_improvement = 0
best_network = copy.deepcopy(network)
print("results for", os.listdir(target_folder_path)[n])
for epoch in range(0, max_training_epochs):
previous_epoch_loss = epoch_loss
epoch_correct = 0
epoch_loss = 0
for images, labels in train_loader:
#Getting predictions before any training on this batch has occurred
predictions = network(images)
loss = F.cross_entropy(predictions, labels)
#making the gradient step for this batch
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_correct += predictions.argmax(dim=1).eq(labels).int().sum().item()
epoch_loss += loss.item()
valid_preds = network(validation_tensor)
valid_loss = F.cross_entropy(valid_preds, label_tensors[n][int(instances * train_split) : int(instances * (train_split + validation_split))])
if (lowest_valid_loss > valid_loss) :
lowest_valid_loss = valid_loss
best_network = copy.deepcopy(network)
epochs_without_improvement = 0
else:
epochs_without_improvement += 1
if (epochs_without_improvement > 10) :
print("stopping early")
break
print("epoch: ", epoch, "\ttrain_loss: ", round(epoch_loss, 5), "\ttrain_correct: ", epoch_correct, "\tvalidation_loss: ", round(valid_loss.item(),5), sep='' )
test_preds = best_network(test_tensor)
test_loss = F.cross_entropy(test_preds, label_tensors[n][int(instances * (train_split + validation_split)) : ])
test_correct = test_preds.argmax(dim=1).eq(label_tensors[n][int(instances * (train_split + validation_split)) : ]).int().sum().item()
print("test_correct: ", test_correct, "/", test_preds.size()[0], "\ttest_loss: ", round(test_loss.item(), 5), sep='' )
|
tigerwxu/gcm-cnn
|
gcm-cnn.py
|
gcm-cnn.py
|
py
| 10,296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.set_printoptions",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "torch.stack",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 247,
"usage_type": "name"
}
] |
36066284113
|
#%%
from PIL import Image
import numpy as np
import onnxruntime
import torch
import cv2
def preprocess_image(image_path, height, width, channels=3):
image = Image.open(image_path)
image = image.resize((width, height), Image.LANCZOS)
image_data = np.asarray(image).astype(np.float32)
image_data = image_data.transpose([2, 0, 1]) # transpose to CHW
mean = np.array([0.079, 0.05, 0]) + 0.406
std = np.array([0.005, 0, 0.001]) + 0.224
for channel in range(image_data.shape[0]):
image_data[channel, :, :] = (image_data[channel, :, :] / 255 - mean[channel]) / std[channel]
image_data = np.expand_dims(image_data, 0)
return image_data
#%%
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def run_sample(session, image_file, categories):
output = session.run([], {'input':preprocess_image(image_file, 224, 224)})[0]
output = output.flatten()
output = softmax(output) # this is optional
top5_catid = np.argsort(-output)[:5]
for catid in top5_catid:
print(categories[catid], output[catid])
# write the result to a file
with open("result.txt", "w") as f:
for catid in top5_catid:
f.write(categories[catid] + " " + str(output[catid]) + " \r")
#%%
# create main function
if __name__ == "__main__":
# Read the categories
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Create Inference Session
session = onnxruntime.InferenceSession("mobilenet_v2_float.onnx")
# get image from camera
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
# capture image from camera
ret, frame = cap.read()
frame = cv2.flip(frame, -1) # Flip camera vertically
cv2.imwrite('capture.jpg', frame)
cap.release()
cv2.destroyAllWindows()
run_sample(session, 'capture.jpg', categories)
# %%
|
cassiebreviu/onnxruntime-raspberrypi
|
inference_mobilenet.py
|
inference_mobilenet.py
|
py
| 2,000 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.Image.LANCZOS",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "onnxruntime.InferenceSession",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 61,
"usage_type": "call"
}
] |
30117142033
|
import numpy as np
from PIL import Image
class predict_day_night_algos:
def __init__(self,img_path,algorithm_choice):
self.img_path = img_path
self.algorithm_choice = algorithm_choice
def select_algorithm(self):
"""
the function selects which algorithm,
based on the user input
"""
algo_choices = ["intensity_based","pixel_based"]
if algo_choices[self.algorithm_choice] == "intensity_based":
print("Using Intensity based method")
intensity_value = self.intensity_algorithm()
if intensity_value >= 0.35:
return "day"
else:
return "night"
elif algo_choices[self.algorithm_choice] == "pixel_based":
print("Using pixel based method")
percentage_darker_pixels = self.pixel_percentage_algorithm()
if percentage_darker_pixels > 0.75:
return "night"
else:
return "day"
def intensity_algorithm(self):
"""
description :the function calculates the intensity based on HSI model,
intensity = (R+G+B)/3, where R,G,B are all normalised arrays/bands
input params : the image path
return : intensity value of the image(single value)
"""
### Reading the images ####
img = Image.open(self.img_path)
###converting to numpy array###
arr = np.array(img)
###normalising the bands individually###
Rn,Gn,Bn = (arr[:,:,0]/255),(arr[:,:,1]/255),(arr[:,:,2]/255)
###calculating the Intensity based on HSI model####
intensity_arr = (Rn+Gn+Bn)/3
#### taking average of the intensity array based on number of pixels in the intensity array ##
intensity_value = np.sum(intensity_arr)/(intensity_arr.shape[0]*intensity_arr.shape[1])
return intensity_value
def pixel_percentage_algorithm(self):
"""
description : this function calculates the percentage of darker pixels,
more the darker pixels tends to darker intensity in the image.
input params : the image path
return : percentage of number of pixels
"""
### Reading the images ####
img = Image.open(self.img_path)
###converting to numpy array###
arr = np.array(img)
### Calculating the number of pixels in the range 0--40, pixels in this range refer to darker intensity ###
num_darker_pixels = np.sum(np.unique(arr,return_counts=True)[1][0:40])
###Calculating the percentage ####
percentage_darker_pixels = (num_darker_pixels)/(arr.shape[0]*arr.shape[1]*arr.shape[2])
##### Rounding the percentage value #####
percentage_darker_pixels = round(percentage_darker_pixels,2)
return percentage_darker_pixels
|
shivargha98/shivargha_bandopadhyay
|
predict_day_night.py
|
predict_day_night.py
|
py
| 2,888 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 66,
"usage_type": "call"
}
] |
30669751378
|
import os
import cv2
dir = "/Users/sunxiaofei/PycharmProjects/remote-server-projects/unlabeled_dataset/data"
for i, eachVid in enumerate(os.listdir(dir)):
vPath = os.path.join(dir, eachVid)
vname = vPath.split("/")[-1][:-4]
print(vname)
print(vPath)
vidcap = cv2.VideoCapture(vPath)
success,image = vidcap.read()
count = 0
valid_count = 0
save_path = "./pic_data/"+vname
if not os.path.exists(save_path):
os.makedirs(save_path)
while success:
if count%40==0:
valid_count += 1
cv2.imwrite("./pic_data/"+vname+"/"+str(valid_count)+".jpg", image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1
|
sxfduter/python_utils
|
video_frame_extraction.py
|
video_frame_extraction.py
|
py
| 709 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 21,
"usage_type": "call"
}
] |
4828707472
|
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from models import Quote, Title, Year
from schemas import QuoteBase, QuoteCreate, TitleBase, TitleCreate, YearBase, YearCreate
import random
import auth
import models
import schemas
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def create_quote(db: Session, quote: QuoteCreate, title_text: str, year_text: str):
db_title = db.query(Title).filter(Title.text == title_text).first()
if not db_title:
db_title = Title(text=title_text)
db.add(db_title)
db.commit()
db.refresh(db_title)
db_year = db.query(Year).filter(Year.text == year_text).first()
if not db_year:
db_year = Year(text=year_text)
db.add(db_year)
db.commit()
db.refresh(db_year)
db_quote = Quote(text=quote.text, name=db_title, periode=db_year)
db.add(db_quote)
db.commit()
db.refresh(db_quote)
return db_quote
#get quote by id
def get_quote(db: Session, quote_id: int):
return db.query(Quote).filter(Quote.id == quote_id).first()
#get random quote between the first and the 10th
def get_quote_random(db:Session):
id = 6
return db.query(Quote).filter(Quote.id == 6)
#update quote by id
def update_quote(db: Session, quote_id: int, quote: QuoteBase):
db_quote = db.query(Quote).filter(Quote.id == quote_id).first()
db_quote.text = quote.text
db.commit()
db.refresh(db_quote)
return db_quote
#delete quote by id
def delete_quote(db: Session, quote_id: int):
db_quote = db.query(Quote).filter(Quote.id == quote_id).first()
db.delete(db_quote)
db.commit()
return {"message": "Quote deleted"}
#
def get_title(db: Session, title_id: int):
return db.query(Title).filter(Title.id == title_id).first()
def delete_title(db: Session, title_id: int):
db_title = db.query(Title).filter(Title.id == title_id).first()
db.delete(db_title)
db.commit()
return {"message": "Title deleted"}
def get_year(db: Session, year_id: int):
return db.query(Year).filter(Year.id == year_id).first()
def delete_year(db: Session, year_id: int):
db_year = db.query(Year).filter(Year.id == year_id).first()
db.delete(db_year)
db.commit()
return {"message": "Year deleted"}
def get_all_quotes(db: Session,skip:int=0,limit:int=50):
all_quotes = db.query(models.Quote).offset(skip).limit(limit).all()
return all_quotes
def get_all_titles(db: Session):
return db.query(Title).all()
def get_all_years(db: Session):
return db.query(Year).all()
# create admin
def create_admin(db: Session, admin: schemas.AdminCreate):
hashed_password = auth.get_password_hash(admin.password)
db_admin = models.Admin(username=admin.username, hashed_password=hashed_password)
adminexists = db.query(models.Admin).filter(models.Admin.username == admin.username).first()
if adminexists:
adminerror = {
"username": "error",
"id": 0,
}
return adminerror
else:
db.add(db_admin)
db.commit()
db.refresh(db_admin)
return db_admin
# get admin by username
def get_admin_username(db: Session, username: str):
admin = db.query(models.Admin).filter(models.Admin.username == username).first()
return admin
# delete admin by username
def delete_admin(db: Session, admin: schemas.Admin):
admin = db.query(models.Admin).filter(models.Admin.username == admin.username).first()
db.delete(admin)
db.commit()
return admin
|
rubenpinxten/herexamen_API
|
myProject/crud.py
|
crud.py
|
py
| 3,543 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.security.OAuth2PasswordBearer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "schemas.QuoteCreate",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.Title",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "models.Title.text",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Title",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Year",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "models.Year.text",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "models.Year",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.Quote",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "models.Quote",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "models.Quote.id",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "models.Quote",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "models.Quote.id",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "schemas.QuoteBase",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.Quote",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "models.Quote.id",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "models.Quote",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "models.Quote.id",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "models.Title",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "models.Title.id",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "models.Title",
"line_number": 63,
"usage_type": "argument"
},
{
"api_name": "models.Title.id",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "models.Year",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "models.Year.id",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "models.Year",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "models.Year.id",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "models.Quote",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "models.Title",
"line_number": 82,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "models.Year",
"line_number": 85,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "schemas.AdminCreate",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "auth.get_password_hash",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "models.Admin",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "models.Admin",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "models.Admin",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "schemas.Admin",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "models.Admin",
"line_number": 111,
"usage_type": "attribute"
}
] |
42319245603
|
from setuptools import setup, find_packages
import codecs
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
import prefetch_generator
# loading README
long_description = prefetch_generator.__doc__
version_string = '1.0.2'
setup(
name="prefetch_generator",
version=version_string,
description="a simple tool to compute arbitrary generator in a background thread",
long_description=long_description,
# Author details
author_email="[email protected]",
url="https://github.com/justheuristic/prefetch_generator",
# Choose your license
license='The Unlicense',
packages=find_packages(),
classifiers=[
# Indicate who your project is intended for
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: The Unlicense (Unlicense)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
# What does your project relate to?
keywords='background generator, prefetch generator, parallel generator, prefetch, background,' + \
'deep learning, theano, tensorflow, lasagne, blocks',
# List run-time dependencies here. These will be installed by pip when your project is installed.
install_requires=[
#nothing
],
)
|
justheuristic/prefetch_generator
|
setup.py
|
setup.py
|
py
| 1,969 |
python
|
en
|
code
| 260 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "prefetch_generator.__doc__",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 26,
"usage_type": "call"
}
] |
29457712632
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''translate.translate: provides main() entry point.'''
__version__ = '0.1.3'
import logging
import argparse
import requests
from bs4 import BeautifulSoup
from terminaltables import AsciiTable
logging.basicConfig(
filename = '.log',
filemode = 'a+',
level = logging.INFO,
format = '%(asctime)s | %(levelname)s | %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S'
)
def main():
''' Parse the arguments and start running what needs to be running '''
parser = argparse.ArgumentParser()
parser.add_argument(
'dictionary', nargs='?', type=str, default='',
help='Dictionary to use for translation. To translate from english to french, it should take the value enfr, for english to italian, enit, etc.'
)
parser.add_argument(
'word', nargs='?', type=str, default='',
help='Word to be translated'
)
parser.add_argument(
'-l', '--list', action='store_true',
help='Returns the list of available dictionaries.'
)
args = parser.parse_args()
logging.info('Arguments parsed')
dictionaries = get_dictionaries()
if args.list:
logging.info('Attempting to print the list of available dictionaries')
print('')
print('**** Available dictionaries:')
print(dictionaries.table)
logging.info('Printed the list of available dictionaries')
if args.word and args.dictionary:
translate_word(args.dictionary, args.word)
else:
if not args.list:
logging.info('User didn\'t pass the correct arguments. Displaying the help message and shutting down')
print('Please enter a dictionary and a word.')
print('\tEnter -l or --list to get a list of all available dictionaries.')
print('Enter -h or --help for help.')
def get_dictionaries():
'''
Requests wordreference.com homepage and parse the list of availables
dictionaries
'''
url = 'http://www.wordreference.com'
logging.info('Requesting {} for parsing'.format(url))
r = requests.get(url)
if r.status_code != 200:
logging.info('Request failed with status {}'.format(r.status_code))
return -1
logging.info('Request for {} successful'.format(url))
logging.info('Attempting to parse the html and extract the list of dictionaries')
soup = BeautifulSoup(r.content, 'html.parser')
options = soup.find_all('option')
dictionaries = [ ['Key', 'Dictionary'] ]
dictionaries += [ [option['id'], option.get_text()] for option in options
if option['id'][:2] != option['id'][2:4] # No definition option
and len(option['id']) == 4 # No synonyms or conjugation option
]
logging.info('List of dictionaries extracted')
table = AsciiTable(dictionaries)
return table
def translate_word(dictionary, word):
'''
Requests the page for the translation of "word" using the dictionary
"dictionary".
Print a formatted version of the response
'''
# Iniital checks
if not isinstance(dictionary, str) or len(dictionary) != 4:
raise TypeError('''The "dictionary" argument must be a string of length 4,
with the first two letters being the acronym of the original
language, and the last two letters, the acronym of the language
you would like to translate to.''')
if not isinstance(word, str):
raise TypeError('The "word" argument must be a string (type {} passed)'.format(type(word)))
# Building the url (and formatting it) and get the html from GET
base_url = 'http://www.wordreference.com/'
url = base_url + dictionary + '/' + word.replace(' ', '%20')
logging.info('Requesting {} for parsing'.format(url))
r = requests.get(url)
if r.status_code != 200:
logging.info('Request failed with status {}'.format(r.status_code))
return -1
logging.info('Request for {} successful'.format(url))
# Parsing the html to extract the data
# I kept it to what matters:
# * Original word/expression
# * Translation
# Because who really cares if it is an intransitive verb or a noun?
logging.info('Attempting to parse the html and extract the translations')
soup = BeautifulSoup(r.content, 'html.parser')
table_single_form = soup.find_all('table', {'class': 'WRD'})[0]
try:
data_single_form = parse_translation_table(table_single_form)
except IndexError:
logging.warning('The word passed doesn\'t have any translation')
return -1
logging.info('Translations extracted')
# print the results in a pretty way
print_results(word, data_single_form)
def parse_translation_table(table):
'''
Given the table of translations extracted with BeautifulSoup, returns
a list of lists containing the various translations.
'''
data = [ ['Original Language', 'Translation'] ]
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) == 3:
if cells[2].em is None:
continue
cells[2].em.decompose()
if cells[0].get_text(strip=True) == '':
data[-1][1] += u'\n{}'.format(cells[2].get_text())
else:
data += [[
cells[0].find('strong').get_text(),
cells[2].get_text()
]]
return data
def print_results(word, data_single_form):
''' Pretty print of the translation results '''
print('')
print('**** Translations for {}:'.format(word))
print(AsciiTable(data_single_form).table)
print('')
|
alvarolopez/translate-term
|
translate/translate.py
|
translate.py
|
py
| 5,821 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "terminaltables.AsciiTable",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "terminaltables.AsciiTable",
"line_number": 167,
"usage_type": "call"
}
] |
15512669243
|
import pygame #Impordime pygame'i
#Defineerime funktsiooni, mis joonistab ruudustiku
def draw_grid(screen, ruudu_suurus, read, veerud, joone_värv):
for i in range(read): #Esimene tsükel, mis käib läbi kõik read
for j in range(veerud): #Teine tsükel, mis käib läbi kõik veerud
rect = pygame.Rect(j * ruudu_suurus, i * ruudu_suurus, ruudu_suurus, ruudu_suurus) #Loon rect objekti (x-koordinaat, y-koordinaat, laius ja kõrgus)
pygame.draw.rect(screen, joone_värv, rect, 1) #Joonistab kasti (ekraani väärtus, joone värv, kast ja joone laius)
# Loome Pygame'i ekraani
pygame.init() #Algatan pygame'i
screen = pygame.display.set_mode((640, 480)) #Määrab akna suuruse
pygame.display.set_caption("Ruudustik") #Määrab praeguse akna pealkirja
# Määrame parameetrid
ruudu_suurus = 20 #Määrab ruudu suuruse
read = 24 #Määrab ridade arvu
veerud = 32 #Määrab veergude arvu
joone_värv = (255, 0, 0) #Määrab joone värvi
#Ristist sulgemine
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Joonistame ekraani täis ruute
screen.fill((150, 255, 150)) #Roheline värv taustaks
draw_grid(screen, ruudu_suurus, read, veerud, joone_värv) #Joonistab ekraanile ruudustiku
pygame.display.update() #Uuendab ekranni
#Lõpetame Pygame'i
pygame.quit()
|
KermoV/Ulesanne_3
|
Ülesanne_3.py
|
Ülesanne_3.py
|
py
| 1,403 |
python
|
et
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.Rect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 35,
"usage_type": "call"
}
] |
14374871985
|
# coding=utf-8
"""Unit tests for activitypub.py."""
from base64 import b64encode
import copy
from datetime import datetime, timedelta
from hashlib import sha256
import logging
from unittest import skip
from unittest.mock import patch
from flask import g
from google.cloud import ndb
from granary import as2, microformats2
from httpsig import HeaderSigner
from oauth_dropins.webutil.testutil import requests_response
from oauth_dropins.webutil.util import json_dumps, json_loads
import requests
from urllib3.exceptions import ReadTimeoutError
from werkzeug.exceptions import BadGateway
# import first so that Fake is defined before URL routes are registered
from .testutil import Fake, TestCase
import activitypub
from activitypub import ActivityPub, postprocess_as2
import common
from models import Follower, Object
import protocol
from web import Web
# have to import module, not attrs, to avoid circular import
from . import test_web
ACTOR = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/users/swentel',
'type': 'Person',
'inbox': 'http://mas.to/inbox',
'name': 'Mrs. ☕ Foo',
'icon': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
'image': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
}
ACTOR_BASE = {
'@context': [
'https://www.w3.org/ns/activitystreams',
'https://w3id.org/security/v1',
],
'type': 'Person',
'id': 'http://localhost/user.com',
'url': 'http://localhost/r/https://user.com/',
'preferredUsername': 'user.com',
'summary': '',
'inbox': 'http://localhost/user.com/inbox',
'outbox': 'http://localhost/user.com/outbox',
'following': 'http://localhost/user.com/following',
'followers': 'http://localhost/user.com/followers',
'endpoints': {
'sharedInbox': 'http://localhost/ap/sharedInbox',
},
'publicKey': {
'id': 'http://localhost/user.com#key',
'owner': 'http://localhost/user.com',
'publicKeyPem': 'populated in setUp()',
},
}
ACTOR_BASE_FULL = {
**ACTOR_BASE,
'name': 'Ms. ☕ Baz',
'attachment': [{
'name': 'Web site',
'type': 'PropertyValue',
'value': '<a rel="me" href="https://user.com/"><span class="invisible">https://</span>user.com<span class="invisible">/</span></a>',
}],
}
REPLY_OBJECT = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Note',
'content': 'A ☕ reply',
'id': 'http://mas.to/reply/id',
'url': 'http://mas.to/reply',
'inReplyTo': 'https://user.com/post',
'to': [as2.PUBLIC_AUDIENCE],
}
REPLY_OBJECT_WRAPPED = copy.deepcopy(REPLY_OBJECT)
REPLY_OBJECT_WRAPPED['inReplyTo'] = 'http://localhost/r/https://user.com/post'
REPLY = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Create',
'id': 'http://mas.to/reply/as2',
'object': REPLY_OBJECT,
}
NOTE_OBJECT = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Note',
'content': '☕ just a normal post',
'id': 'http://mas.to/note/id',
'url': 'http://mas.to/note',
'to': [as2.PUBLIC_AUDIENCE],
'cc': [
'https://mas.to/author/followers',
'https://masto.foo/@other',
'http://localhost/target', # redirect-wrapped
],
}
NOTE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Create',
'id': 'http://mas.to/note/as2',
'actor': 'https://masto.foo/@author',
'object': NOTE_OBJECT,
}
MENTION_OBJECT = copy.deepcopy(NOTE_OBJECT)
MENTION_OBJECT.update({
'id': 'http://mas.to/mention/id',
'url': 'http://mas.to/mention',
'tag': [{
'type': 'Mention',
'href': 'https://masto.foo/@other',
'name': '@[email protected]',
}, {
'type': 'Mention',
'href': 'http://localhost/tar.get', # redirect-wrapped
'name': '@[email protected]',
}],
})
MENTION = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Create',
'id': 'http://mas.to/mention/as2',
'object': MENTION_OBJECT,
}
# based on example Mastodon like:
# https://github.com/snarfed/bridgy-fed/issues/4#issuecomment-334212362
# (reposts are very similar)
LIKE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://mas.to/like#ok',
'type': 'Like',
'object': 'https://user.com/post',
'actor': 'https://mas.to/actor',
}
LIKE_WRAPPED = copy.deepcopy(LIKE)
LIKE_WRAPPED['object'] = 'http://localhost/r/https://user.com/post'
LIKE_ACTOR = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/actor',
'type': 'Person',
'name': 'Ms. Actor',
'preferredUsername': 'msactor',
'icon': {'type': 'Image', 'url': 'https://user.com/pic.jpg'},
'image': [
{'type': 'Image', 'url': 'https://user.com/thumb.jpg'},
{'type': 'Image', 'url': 'https://user.com/pic.jpg'},
],
}
LIKE_WITH_ACTOR = {
**LIKE,
'actor': LIKE_ACTOR,
}
# repost, should be delivered to followers if object is a fediverse post,
# translated to webmention if object is an indieweb post
REPOST = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/users/alice/statuses/654/activity',
'type': 'Announce',
'actor': ACTOR['id'],
'object': NOTE_OBJECT['id'],
'published': '2023-02-08T17:44:16Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
}
REPOST_FULL = {
**REPOST,
'actor': ACTOR,
'object': NOTE_OBJECT,
}
FOLLOW = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/6d1a',
'type': 'Follow',
'actor': ACTOR['id'],
'object': 'https://user.com/',
}
FOLLOW_WRAPPED = copy.deepcopy(FOLLOW)
FOLLOW_WRAPPED['object'] = 'http://localhost/user.com'
FOLLOW_WITH_ACTOR = copy.deepcopy(FOLLOW)
FOLLOW_WITH_ACTOR['actor'] = ACTOR
FOLLOW_WRAPPED_WITH_ACTOR = copy.deepcopy(FOLLOW_WRAPPED)
FOLLOW_WRAPPED_WITH_ACTOR['actor'] = ACTOR
FOLLOW_WITH_OBJECT = copy.deepcopy(FOLLOW)
FOLLOW_WITH_OBJECT['object'] = ACTOR
ACCEPT_FOLLOW = copy.deepcopy(FOLLOW_WITH_ACTOR)
del ACCEPT_FOLLOW['@context']
del ACCEPT_FOLLOW['actor']['@context']
ACCEPT_FOLLOW['actor']['image'] = {'type': 'Image', 'url': 'https://user.com/me.jpg'}
ACCEPT_FOLLOW['object'] = 'http://localhost/user.com'
ACCEPT = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Accept',
'id': 'http://localhost/web/user.com/followers#accept-https://mas.to/6d1a',
'actor': 'http://localhost/user.com',
'object': {
**ACCEPT_FOLLOW,
'url': 'https://mas.to/users/swentel#followed-https://user.com/',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
},
'to': ['https://www.w3.org/ns/activitystreams#Public'],
}
UNDO_FOLLOW_WRAPPED = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/6d1b',
'type': 'Undo',
'actor': 'https://mas.to/users/swentel',
'object': FOLLOW_WRAPPED,
}
DELETE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/users/swentel#delete',
'type': 'Delete',
'actor': 'https://mas.to/users/swentel',
'object': 'https://mas.to/users/swentel',
}
UPDATE_PERSON = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://a/person#update',
'type': 'Update',
'actor': 'https://mas.to/users/swentel',
'object': {
'type': 'Person',
'id': 'https://a/person',
},
}
UPDATE_NOTE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://a/note#update',
'type': 'Update',
'actor': 'https://mas.to/users/swentel',
'object': {
'type': 'Note',
'id': 'https://a/note',
},
}
WEBMENTION_DISCOVERY = requests_response(
'<html><head><link rel="webmention" href="/webmention"></html>')
HTML = requests_response('<html></html>', headers={
'Content-Type': common.CONTENT_TYPE_HTML,
})
HTML_WITH_AS2 = requests_response("""\
<html><meta>
<link href='http://as2' rel='alternate' type='application/activity+json'>
</meta></html>
""", headers={
'Content-Type': common.CONTENT_TYPE_HTML,
})
AS2_OBJ = {'foo': ['bar']}
AS2 = requests_response(AS2_OBJ, headers={
'Content-Type': as2.CONTENT_TYPE,
})
NOT_ACCEPTABLE = requests_response(status=406)
@patch('requests.post')
@patch('requests.get')
@patch('requests.head')
class ActivityPubTest(TestCase):
def setUp(self):
super().setUp()
self.request_context.push()
self.user = self.make_user('user.com', has_hcard=True, has_redirects=True,
obj_as2={**ACTOR, 'id': 'https://user.com/'})
self.swentel_key = ndb.Key(ActivityPub, 'https://mas.to/users/swentel')
self.masto_actor_key = ndb.Key(ActivityPub, 'https://mas.to/actor')
ACTOR_BASE['publicKey']['publicKeyPem'] = self.user.public_pem().decode()
self.key_id_obj = Object(id='http://my/key/id', as2={
**ACTOR,
'publicKey': {
'id': 'http://my/key/id#unused',
'owner': 'http://own/er',
'publicKeyPem': self.user.public_pem().decode(),
},
})
self.key_id_obj.put()
def assert_object(self, id, **props):
props.setdefault('delivered_protocol', 'web')
return super().assert_object(id, **props)
def sign(self, path, body):
"""Constructs HTTP Signature, returns headers."""
digest = b64encode(sha256(body.encode()).digest()).decode()
headers = {
'Date': 'Sun, 02 Jan 2022 03:04:05 GMT',
'Host': 'localhost',
'Content-Type': as2.CONTENT_TYPE,
'Digest': f'SHA-256={digest}',
}
hs = HeaderSigner('http://my/key/id#unused', self.user.private_pem().decode(),
algorithm='rsa-sha256', sign_header='signature',
headers=('Date', 'Host', 'Digest', '(request-target)'))
return hs.sign(headers, method='POST', path=path)
def post(self, path, json=None):
"""Wrapper around self.client.post that adds signature."""
body = json_dumps(json)
return self.client.post(path, data=body, headers=self.sign(path, body))
def test_actor_fake(self, *_):
self.make_user('user.com', cls=Fake, obj_as2={
'type': 'Person',
'id': 'https://user.com/',
})
got = self.client.get('/ap/fake/user.com')
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
type = got.headers['Content-Type']
self.assertTrue(type.startswith(as2.CONTENT_TYPE), type)
self.assertEqual({
'@context': ['https://w3id.org/security/v1'],
'type': 'Person',
'id': 'http://bf/fake/user.com/ap',
'preferredUsername': 'user.com',
'url': 'http://localhost/r/user.com',
'summary': '',
'inbox': 'http://bf/fake/user.com/ap/inbox',
'outbox': 'http://bf/fake/user.com/ap/outbox',
'following': 'http://bf/fake/user.com/ap/following',
'followers': 'http://bf/fake/user.com/ap/followers',
'endpoints': {'sharedInbox': 'http://localhost/ap/sharedInbox'},
'publicKey': {
'id': 'http://localhost/user.com#key',
'owner': 'http://localhost/user.com',
'publicKeyPem': self.user.public_pem().decode(),
},
}, got.json)
def test_actor_web(self, *_):
"""Web users are special cased to drop the /web/ prefix."""
got = self.client.get('/user.com')
self.assertEqual(200, got.status_code)
type = got.headers['Content-Type']
self.assertTrue(type.startswith(as2.CONTENT_TYPE), type)
self.assertEqual({
**ACTOR_BASE,
'name': 'Mrs. ☕ Foo',
'icon': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
'image': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
}, got.json)
def test_actor_blocked_tld(self, _, __, ___):
got = self.client.get('/foo.json')
self.assertEqual(404, got.status_code)
def test_actor_new_user_fetch(self, _, mock_get, __):
self.user.obj_key.delete()
self.user.key.delete()
protocol.objects_cache.clear()
mock_get.return_value = requests_response(test_web.ACTOR_HTML)
got = self.client.get('/user.com')
self.assertEqual(200, got.status_code)
self.assert_equals(ACTOR_BASE_FULL, got.json, ignore=['publicKeyPem'])
def test_actor_new_user_fetch_no_mf2(self, _, mock_get, __):
self.user.obj_key.delete()
self.user.key.delete()
protocol.objects_cache.clear()
mock_get.return_value = requests_response('<html></html>')
got = self.client.get('/user.com')
self.assertEqual(200, got.status_code)
self.assert_equals(ACTOR_BASE, got.json, ignore=['publicKeyPem'])
def test_actor_new_user_fetch_fails(self, _, mock_get, __):
mock_get.side_effect = ReadTimeoutError(None, None, None)
got = self.client.get('/nope.com')
self.assertEqual(504, got.status_code)
def test_individual_inbox_no_user(self, mock_head, mock_get, mock_post):
self.user.key.delete()
mock_get.side_effect = [self.as2_resp(LIKE_ACTOR)]
reply = {
**REPLY,
'actor': LIKE_ACTOR,
}
self._test_inbox_reply(reply, mock_head, mock_get, mock_post)
self.assert_user(ActivityPub, 'https://mas.to/actor',
obj_as2=LIKE_ACTOR, direct=True)
def test_inbox_activity_without_id(self, *_):
note = copy.deepcopy(NOTE)
del note['id']
resp = self.post('/ap/sharedInbox', json=note)
self.assertEqual(400, resp.status_code)
def test_inbox_reply_object(self, mock_head, mock_get, mock_post):
self._test_inbox_reply(REPLY_OBJECT, mock_head, mock_get, mock_post)
self.assert_object('http://mas.to/reply/id',
source_protocol='activitypub',
our_as1=as2.to_as1(REPLY_OBJECT),
type='comment')
# auto-generated post activity
self.assert_object(
'http://mas.to/reply/id#bridgy-fed-create',
source_protocol='activitypub',
our_as1={
**as2.to_as1(REPLY),
'id': 'http://mas.to/reply/id#bridgy-fed-create',
'published': '2022-01-02T03:04:05+00:00',
},
status='complete',
delivered=['https://user.com/post'],
type='post',
notify=[self.user.key],
)
def test_inbox_reply_object_wrapped(self, mock_head, mock_get, mock_post):
self._test_inbox_reply(REPLY_OBJECT_WRAPPED, mock_head, mock_get, mock_post)
self.assert_object('http://mas.to/reply/id',
source_protocol='activitypub',
our_as1=as2.to_as1(REPLY_OBJECT),
type='comment')
# auto-generated post activity
self.assert_object(
'http://mas.to/reply/id#bridgy-fed-create',
source_protocol='activitypub',
our_as1={
**as2.to_as1(REPLY),
'id': 'http://mas.to/reply/id#bridgy-fed-create',
'published': '2022-01-02T03:04:05+00:00',
},
status='complete',
delivered=['https://user.com/post'],
type='post',
notify=[self.user.key],
)
def test_inbox_reply_create_activity(self, mock_head, mock_get, mock_post):
self._test_inbox_reply(REPLY, mock_head, mock_get, mock_post)
self.assert_object('http://mas.to/reply/id',
source_protocol='activitypub',
our_as1=as2.to_as1({
**REPLY_OBJECT,
'author': None,
}),
type='comment')
# sent activity
self.assert_object(
'http://mas.to/reply/as2',
source_protocol='activitypub',
as2=REPLY,
status='complete',
delivered=['https://user.com/post'],
type='post',
notify=[self.user.key],
)
def _test_inbox_reply(self, reply, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/post')
mock_get.side_effect = (
(list(mock_get.side_effect) if mock_get.side_effect else [])
+ [
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
WEBMENTION_DISCOVERY,
])
mock_post.return_value = requests_response()
got = self.post('/ap/web/user.com/inbox', json=reply)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
self.assert_req(mock_get, 'https://user.com/post')
convert_id = reply['id'].replace('://', ':/')
if reply['type'] != 'Create':
convert_id += '%23bridgy-fed-create'
self.assert_req(
mock_post,
'https://user.com/webmention',
headers={'Accept': '*/*'},
allow_redirects=False,
data={
'source': f'https://ap.brid.gy/convert/web/{convert_id}',
'target': 'https://user.com/post',
},
)
def test_inbox_reply_to_self_domain(self, *mocks):
self._test_inbox_ignore_reply_to('http://localhost/mas.to', *mocks)
def test_inbox_reply_to_in_blocklist(self, *mocks):
self._test_inbox_ignore_reply_to('https://twitter.com/foo', *mocks)
def _test_inbox_ignore_reply_to(self, reply_to, mock_head, mock_get, mock_post):
reply = copy.deepcopy(REPLY_OBJECT)
reply['inReplyTo'] = reply_to
mock_head.return_value = requests_response(url='http://mas.to/')
mock_get.side_effect = [
# actor fetch
self.as2_resp(ACTOR),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
]
got = self.post('/user.com/inbox', json=reply)
self.assertEqual(204, got.status_code, got.get_data(as_text=True))
mock_post.assert_not_called()
def test_individual_inbox_create_obj(self, *mocks):
self._test_inbox_create_obj('/user.com/inbox', *mocks)
def test_shared_inbox_create_obj(self, *mocks):
self._test_inbox_create_obj('/inbox', *mocks)
def _test_inbox_create_obj(self, path, mock_head, mock_get, mock_post):
swentel = self.make_user('https://mas.to/users/swentel', cls=ActivityPub)
Follower.get_or_create(to=swentel, from_=self.user)
bar = self.make_user('fake:bar', cls=Fake, obj_id='fake:bar')
Follower.get_or_create(to=self.make_user('https://other.actor',
cls=ActivityPub),
from_=bar)
baz = self.make_user('fake:baz', cls=Fake, obj_id='fake:baz')
Follower.get_or_create(to=swentel, from_=baz)
baj = self.make_user('fake:baj', cls=Fake, obj_id='fake:baj')
Follower.get_or_create(to=swentel, from_=baj, status='inactive')
mock_head.return_value = requests_response(url='http://target')
mock_get.return_value = self.as2_resp(ACTOR) # source actor
mock_post.return_value = requests_response()
got = self.post(path, json=NOTE)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
expected_obj = {
**as2.to_as1(NOTE_OBJECT),
'author': {'id': 'https://masto.foo/@author'},
}
self.assert_object(NOTE_OBJECT['id'],
source_protocol='activitypub',
our_as1=expected_obj,
type='note',
feed=[self.user.key, baz.key])
expected_create = as2.to_as1(common.redirect_unwrap(NOTE))
expected_create.update({
'actor': as2.to_as1(ACTOR),
'object': expected_obj,
})
self.assert_object('http://mas.to/note/as2',
source_protocol='activitypub',
our_as1=expected_create,
users=[ndb.Key(ActivityPub, 'https://masto.foo/@author')],
type='post',
object_ids=[NOTE_OBJECT['id']],
status='complete',
delivered=['shared:target'],
delivered_protocol='fake')
def test_repost_of_indieweb(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/orig')
mock_get.return_value = WEBMENTION_DISCOVERY
mock_post.return_value = requests_response() # webmention
orig_url = 'https://user.com/orig'
note = {
**NOTE_OBJECT,
'id': 'https://user.com/orig',
}
del note['url']
Object(id=orig_url, mf2=microformats2.object_to_json(as2.to_as1(note)),
source_protocol='web').put()
repost = copy.deepcopy(REPOST_FULL)
repost['object'] = f'http://localhost/r/{orig_url}'
got = self.post('/user.com/inbox', json=repost)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
convert_id = REPOST['id'].replace('://', ':/')
self.assert_req(
mock_post,
'https://user.com/webmention',
headers={'Accept': '*/*'},
allow_redirects=False,
data={
'source': f'https://ap.brid.gy/convert/web/{convert_id}',
'target': orig_url,
},
)
self.assert_object(REPOST_FULL['id'],
source_protocol='activitypub',
status='complete',
as2={
**REPOST,
'actor': ACTOR,
'object': orig_url,
},
users=[self.swentel_key],
delivered=['https://user.com/orig'],
type='share',
object_ids=['https://user.com/orig'])
def test_shared_inbox_repost_of_fediverse(self, mock_head, mock_get, mock_post):
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']),
from_=self.user)
baz = self.make_user('fake:baz', cls=Fake, obj_id='fake:baz')
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']), from_=baz)
baj = self.make_user('fake:baj', cls=Fake, obj_id='fake:baj')
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']),
from_=baj, status='inactive')
mock_head.return_value = requests_response(url='http://target')
mock_get.side_effect = [
self.as2_resp(ACTOR), # source actor
self.as2_resp(NOTE_OBJECT), # object of repost
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
HTML, # no webmention endpoint
]
got = self.post('/ap/sharedInbox', json=REPOST)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
mock_post.assert_not_called() # no webmention
self.assert_object(REPOST['id'],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1({**REPOST, 'actor': ACTOR}),
users=[self.swentel_key],
feed=[self.user.key, baz.key],
delivered=['shared:target'],
delivered_protocol='fake',
type='share',
object_ids=[REPOST['object']])
def test_inbox_no_user(self, mock_head, mock_get, mock_post):
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
# target post webmention discovery
HTML,
]
got = self.post('/ap/sharedInbox', json={
**LIKE,
'object': 'http://nope.com/post',
})
self.assertEqual(204, got.status_code)
self.assert_object('http://mas.to/like#ok',
# no nope.com Web user key since it didn't exist
source_protocol='activitypub',
status='ignored',
our_as1=as2.to_as1({
**LIKE_WITH_ACTOR,
'object': 'http://nope.com/post',
}),
type='like',
notify=[self.user.key],
users=[self.masto_actor_key],
object_ids=['http://nope.com/post'])
def test_inbox_not_public(self, mock_head, mock_get, mock_post):
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']),
from_=self.user)
mock_head.return_value = requests_response(url='http://target')
mock_get.return_value = self.as2_resp(ACTOR) # source actor
not_public = copy.deepcopy(NOTE)
del not_public['object']['to']
got = self.post('/user.com/inbox', json=not_public)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
self.assertIsNone(Object.get_by_id(not_public['id']))
self.assertIsNone(Object.get_by_id(not_public['object']['id']))
def test_inbox_like(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/post')
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=LIKE)
self.assertEqual(200, got.status_code)
self.assertIn(self.as2_req('https://mas.to/actor'), mock_get.mock_calls)
self.assertIn(self.req('https://user.com/post'), mock_get.mock_calls)
args, kwargs = mock_post.call_args
self.assertEqual(('https://user.com/webmention',), args)
self.assertEqual({
'source': 'https://ap.brid.gy/convert/web/http:/mas.to/like%23ok',
'target': 'https://user.com/post',
}, kwargs['data'])
self.assert_object('http://mas.to/like#ok',
notify=[self.user.key],
users=[self.masto_actor_key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1(LIKE_WITH_ACTOR),
delivered=['https://user.com/post'],
type='like',
object_ids=[LIKE['object']])
def test_inbox_like_indirect_user_creates_User(self, mock_get, *_):
self.user.direct = False
self.user.put()
mock_get.return_value = self.as2_resp(LIKE_ACTOR)
self.test_inbox_like()
self.assert_user(ActivityPub, 'https://mas.to/actor',
obj_as2=LIKE_ACTOR, direct=True)
def test_inbox_follow_accept_with_id(self, *mocks):
self._test_inbox_follow_accept(FOLLOW_WRAPPED, ACCEPT, 200, *mocks)
follow = {
**FOLLOW_WITH_ACTOR,
'url': 'https://mas.to/users/swentel#followed-https://user.com/',
}
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1(follow),
delivered=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def test_inbox_follow_accept_with_object(self, *mocks):
follow = {
**FOLLOW,
'object': {
'id': FOLLOW['object'],
'url': FOLLOW['object'],
},
}
self._test_inbox_follow_accept(follow, ACCEPT, 200, *mocks)
follow.update({
'actor': ACTOR,
'url': 'https://mas.to/users/swentel#followed-https://user.com/',
})
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1(follow),
delivered=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def test_inbox_follow_accept_shared_inbox(self, *mocks):
self._test_inbox_follow_accept(FOLLOW_WRAPPED, ACCEPT, 200, *mocks,
inbox_path='/ap/sharedInbox')
url = 'https://mas.to/users/swentel#followed-https://user.com/'
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1({**FOLLOW_WITH_ACTOR, 'url': url}),
delivered=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def test_inbox_follow_accept_webmention_fails(self, mock_head, mock_get,
mock_post):
mock_post.side_effect = [
requests_response(), # AP Accept
requests.ConnectionError(), # webmention
]
self._test_inbox_follow_accept(FOLLOW_WRAPPED, ACCEPT, 502,
mock_head, mock_get, mock_post)
url = 'https://mas.to/users/swentel#followed-https://user.com/'
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='failed',
our_as1=as2.to_as1({**FOLLOW_WITH_ACTOR, 'url': url}),
delivered=[],
failed=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def _test_inbox_follow_accept(self, follow_as2, accept_as2, expected_status,
mock_head, mock_get, mock_post,
inbox_path='/user.com/inbox'):
# this should makes us make the follower ActivityPub as direct=True
self.user.direct = False
self.user.put()
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
# source actor
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
if not mock_post.return_value and not mock_post.side_effect:
mock_post.return_value = requests_response()
got = self.post(inbox_path, json=follow_as2)
self.assertEqual(expected_status, got.status_code)
mock_get.assert_has_calls((
self.as2_req(FOLLOW['actor']),
))
# check AP Accept
self.assertEqual(2, len(mock_post.call_args_list))
args, kwargs = mock_post.call_args_list[0]
self.assertEqual(('http://mas.to/inbox',), args)
self.assertEqual(accept_as2, json_loads(kwargs['data']))
# check webmention
args, kwargs = mock_post.call_args_list[1]
self.assertEqual(('https://user.com/webmention',), args)
self.assertEqual({
'source': 'https://ap.brid.gy/convert/web/https:/mas.to/6d1a',
'target': 'https://user.com/',
}, kwargs['data'])
# check that we stored Follower and ActivityPub user for the follower
self.assert_entities_equal(
Follower(to=self.user.key,
from_=ActivityPub(id=ACTOR['id']).key,
status='active',
follow=Object(id=FOLLOW['id']).key),
Follower.query().fetch(),
ignore=['created', 'updated'])
self.assert_user(ActivityPub, 'https://mas.to/users/swentel',
obj_as2=ACTOR, direct=True)
self.assert_user(Web, 'user.com', direct=False,
has_hcard=True, has_redirects=True)
def test_inbox_follow_use_instead_strip_www(self, mock_head, mock_get, mock_post):
self.make_user('www.user.com', use_instead=self.user.key)
mock_head.return_value = requests_response(url='https://www.user.com/')
mock_get.side_effect = [
# source actor
self.as2_resp(ACTOR),
# target post webmention discovery
requests_response('<html></html>'),
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(204, got.status_code)
follower = Follower.query().get()
self.assert_entities_equal(
Follower(to=self.user.key,
from_=ActivityPub(id=ACTOR['id']).key,
status='active',
follow=Object(id=FOLLOW['id']).key),
follower,
ignore=['created', 'updated'])
# double check that Follower doesn't have www
self.assertEqual('user.com', follower.to.id())
# double check that follow Object doesn't have www
self.assertEqual('active', follower.status)
self.assertEqual('https://mas.to/users/swentel#followed-https://user.com/',
follower.follow.get().as2['url'])
def test_inbox_undo_follow(self, mock_head, mock_get, mock_post):
follower = Follower(to=self.user.key,
from_=ActivityPub.get_or_create(ACTOR['id']).key,
status='active')
follower.put()
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=UNDO_FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
# check that the Follower is now inactive
self.assertEqual('inactive', follower.key.get().status)
def test_inbox_follow_inactive(self, mock_head, mock_get, mock_post):
follower = Follower.get_or_create(to=self.user,
from_=ActivityPub.get_or_create(ACTOR['id']),
status='inactive')
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
# source actor
self.as2_resp(FOLLOW_WITH_ACTOR['actor']),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
# check that the Follower is now active
self.assertEqual('active', follower.key.get().status)
def test_inbox_undo_follow_doesnt_exist(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=UNDO_FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
def test_inbox_undo_follow_inactive(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
follower = Follower.get_or_create(to=self.user,
from_=ActivityPub.get_or_create(ACTOR['id']),
status='inactive')
got = self.post('/user.com/inbox', json=UNDO_FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
self.assertEqual('inactive', follower.key.get().status)
def test_inbox_undo_follow_composite_object(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
follower = Follower.get_or_create(to=self.user,
from_=ActivityPub.get_or_create(ACTOR['id']),
status='inactive')
undo_follow = copy.deepcopy(UNDO_FOLLOW_WRAPPED)
undo_follow['object']['object'] = {'id': undo_follow['object']['object']}
got = self.post('/user.com/inbox', json=undo_follow)
self.assertEqual(200, got.status_code)
self.assertEqual('inactive', follower.key.get().status)
def test_inbox_unsupported_type(self, *_):
got = self.post('/user.com/inbox', json={
'@context': ['https://www.w3.org/ns/activitystreams'],
'id': 'https://xoxo.zone/users/aaronpk#follows/40',
'type': 'Block',
'actor': 'https://xoxo.zone/users/aaronpk',
'object': 'http://snarfed.org/',
})
self.assertEqual(501, got.status_code)
def test_inbox_bad_object_url(self, mock_head, mock_get, mock_post):
# https://console.cloud.google.com/errors/detail/CMKn7tqbq-GIRA;time=P30D?project=bridgy-federated
mock_get.return_value = self.as2_resp(ACTOR) # source actor
id = 'https://mas.to/users/tmichellemoore#likes/56486252'
bad_url = 'http://localhost/r/Testing \u2013 Brid.gy \u2013 Post to Mastodon 3'
bad = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': id,
'type': 'Like',
'actor': ACTOR['id'],
'object': bad_url,
}
got = self.post('/user.com/inbox', json=bad)
# bad object, should ignore activity
self.assertEqual(204, got.status_code)
mock_post.assert_not_called()
self.assert_object(id,
our_as1={
**as2.to_as1(bad),
'actor': as2.to_as1(ACTOR),
},
users=[self.swentel_key],
source_protocol='activitypub',
status='ignored',
)
self.assertIsNone(Object.get_by_id(bad_url))
@patch('activitypub.logger.info', side_effect=logging.info)
@patch('common.logger.info', side_effect=logging.info)
@patch('oauth_dropins.webutil.appengine_info.DEBUG', False)
def test_inbox_verify_http_signature(self, mock_common_log, mock_activitypub_log,
_, mock_get, ___):
# actor with a public key
self.key_id_obj.key.delete()
protocol.objects_cache.clear()
actor_as2 = {
**ACTOR,
'publicKey': {
'id': 'http://my/key/id#unused',
'owner': 'http://own/er',
'publicKeyPem': self.user.public_pem().decode(),
},
}
mock_get.return_value = self.as2_resp(actor_as2)
# valid signature
body = json_dumps(NOTE)
headers = self.sign('/ap/sharedInbox', json_dumps(NOTE))
resp = self.client.post('/ap/sharedInbox', data=body, headers=headers)
self.assertEqual(204, resp.status_code, resp.get_data(as_text=True))
mock_get.assert_has_calls((
self.as2_req('http://my/key/id'),
))
mock_activitypub_log.assert_any_call('HTTP Signature verified!')
# valid signature, Object has no key
self.key_id_obj.as2 = ACTOR
self.key_id_obj.put()
resp = self.client.post('/ap/sharedInbox', data=body, headers=headers)
self.assertEqual(401, resp.status_code, resp.get_data(as_text=True))
# valid signature, Object has our_as1 instead of as2
self.key_id_obj.clear()
self.key_id_obj.our_as1 = as2.to_as1(actor_as2)
self.key_id_obj.put()
resp = self.client.post('/ap/sharedInbox', data=body, headers=headers)
self.assertEqual(204, resp.status_code, resp.get_data(as_text=True))
mock_activitypub_log.assert_any_call('HTTP Signature verified!')
# invalid signature, missing keyId
protocol.seen_ids.clear()
obj_key = ndb.Key(Object, NOTE['id'])
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', data=body, headers={
**headers,
'signature': headers['signature'].replace(
'keyId="http://my/key/id#unused",', ''),
})
self.assertEqual(401, resp.status_code)
self.assertEqual({'error': 'HTTP Signature missing keyId'}, resp.json)
mock_common_log.assert_any_call('Returning 401: HTTP Signature missing keyId', exc_info=None)
# invalid signature, content changed
protocol.seen_ids.clear()
obj_key = ndb.Key(Object, NOTE['id'])
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', json={**NOTE, 'content': 'z'}, headers=headers)
self.assertEqual(401, resp.status_code)
self.assertEqual({'error': 'Invalid Digest header, required for HTTP Signature'},
resp.json)
mock_common_log.assert_any_call('Returning 401: Invalid Digest header, required for HTTP Signature', exc_info=None)
# invalid signature, header changed
protocol.seen_ids.clear()
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', data=body, headers={**headers, 'Date': 'X'})
self.assertEqual(401, resp.status_code)
self.assertEqual({'error': 'HTTP Signature verification failed'}, resp.json)
mock_common_log.assert_any_call('Returning 401: HTTP Signature verification failed', exc_info=None)
# no signature
protocol.seen_ids.clear()
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', json=NOTE)
self.assertEqual(401, resp.status_code, resp.get_data(as_text=True))
self.assertEqual({'error': 'No HTTP Signature'}, resp.json)
mock_common_log.assert_any_call('Returning 401: No HTTP Signature', exc_info=None)
def test_delete_actor(self, *mocks):
follower = Follower.get_or_create(
to=self.user, from_=ActivityPub.get_or_create(DELETE['actor']))
followee = Follower.get_or_create(
to=ActivityPub.get_or_create(DELETE['actor']),
from_=Fake.get_or_create('snarfed.org'))
# other unrelated follower
other = Follower.get_or_create(
to=self.user, from_=ActivityPub.get_or_create('https://mas.to/users/other'))
self.assertEqual(3, Follower.query().count())
got = self.post('/ap/sharedInbox', json=DELETE)
self.assertEqual(204, got.status_code)
self.assertEqual('inactive', follower.key.get().status)
self.assertEqual('inactive', followee.key.get().status)
self.assertEqual('active', other.key.get().status)
def test_delete_actor_not_fetchable(self, _, mock_get, ___):
self.key_id_obj.key.delete()
protocol.objects_cache.clear()
mock_get.return_value = requests_response(status=410)
got = self.post('/ap/sharedInbox', json={**DELETE, 'object': 'http://my/key/id'})
self.assertEqual(202, got.status_code)
def test_delete_actor_empty_deleted_object(self, _, mock_get, ___):
self.key_id_obj.as2 = None
self.key_id_obj.deleted = True
self.key_id_obj.put()
protocol.objects_cache.clear()
got = self.post('/ap/sharedInbox', json={**DELETE, 'object': 'http://my/key/id'})
self.assertEqual(202, got.status_code)
mock_get.assert_not_called()
def test_delete_note(self, _, mock_get, ___):
obj = Object(id='http://an/obj')
obj.put()
mock_get.side_effect = [
self.as2_resp(ACTOR),
]
delete = {
**DELETE,
'object': 'http://an/obj',
}
resp = self.post('/ap/sharedInbox', json=delete)
self.assertEqual(204, resp.status_code)
self.assertTrue(obj.key.get().deleted)
self.assert_object(delete['id'],
our_as1={
**as2.to_as1(delete),
'actor': as2.to_as1(ACTOR),
},
type='delete',
source_protocol='activitypub',
status='ignored',
users=[ActivityPub(id='https://mas.to/users/swentel').key])
obj.populate(deleted=True, as2=None)
self.assert_entities_equal(obj,
protocol.objects_cache['http://an/obj'],
ignore=['expire', 'created', 'updated'])
def test_update_note(self, *mocks):
Object(id='https://a/note', as2={}).put()
self._test_update(*mocks)
def test_update_unknown(self, *mocks):
self._test_update(*mocks)
def _test_update(self, _, mock_get, ___):
mock_get.side_effect = [
self.as2_resp(ACTOR),
]
resp = self.post('/ap/sharedInbox', json=UPDATE_NOTE)
self.assertEqual(204, resp.status_code)
note_as1 = as2.to_as1({
**UPDATE_NOTE['object'],
'author': {'id': 'https://mas.to/users/swentel'},
})
self.assert_object('https://a/note',
type='note',
our_as1=note_as1,
source_protocol='activitypub')
update_as1 = {
**as2.to_as1(UPDATE_NOTE),
'object': note_as1,
'actor': as2.to_as1(ACTOR),
}
self.assert_object(UPDATE_NOTE['id'],
source_protocol='activitypub',
type='update',
status='ignored',
our_as1=update_as1,
users=[self.swentel_key])
self.assert_entities_equal(Object.get_by_id('https://a/note'),
protocol.objects_cache['https://a/note'])
def test_inbox_webmention_discovery_connection_fails(self, mock_head,
mock_get, mock_post):
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
# target post webmention discovery
ReadTimeoutError(None, None, None),
]
got = self.post('/user.com/inbox', json=LIKE)
self.assertEqual(502, got.status_code)
def test_inbox_no_webmention_endpoint(self, mock_head, mock_get, mock_post):
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
# target post webmention discovery
HTML,
]
got = self.post('/user.com/inbox', json=LIKE)
self.assertEqual(204, got.status_code)
self.assert_object('http://mas.to/like#ok',
notify=[self.user.key],
users=[self.masto_actor_key],
source_protocol='activitypub',
status='ignored',
our_as1=as2.to_as1(LIKE_WITH_ACTOR),
type='like',
object_ids=[LIKE['object']])
def test_inbox_id_already_seen(self, *mocks):
obj_key = Object(id=FOLLOW_WRAPPED['id'], as2={}).put()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(204, got.status_code)
self.assertEqual(0, Follower.query().count())
# second time should use in memory cache
obj_key.delete()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(204, got.status_code)
self.assertEqual(0, Follower.query().count())
def test_followers_collection_unknown_user(self, *_):
resp = self.client.get('/nope.com/followers')
self.assertEqual(404, resp.status_code)
def test_followers_collection_empty(self, *_):
resp = self.client.get('/user.com/followers')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/followers',
'type': 'Collection',
'summary': "user.com's followers",
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/followers',
'items': [],
},
}, resp.json)
def store_followers(self):
follow = Object(id=FOLLOW_WITH_ACTOR['id'], as2=FOLLOW_WITH_ACTOR).put()
Follower.get_or_create(
to=self.user,
from_=self.make_user('http://bar', cls=ActivityPub, obj_as2=ACTOR),
follow=follow)
Follower.get_or_create(
to=self.make_user('https://other.actor', cls=ActivityPub),
from_=self.user)
Follower.get_or_create(
to=self.user,
from_=self.make_user('http://baz', cls=ActivityPub, obj_as2=ACTOR),
follow=follow)
Follower.get_or_create(
to=self.user,
from_=self.make_user('http://baj', cls=Fake),
status='inactive')
def test_followers_collection_fake(self, *_):
self.make_user('foo.com', cls=Fake)
resp = self.client.get('/ap/fake/foo.com/followers')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/ap/fake/foo.com/followers',
'type': 'Collection',
'summary': "foo.com's followers",
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/ap/fake/foo.com/followers',
'items': [],
},
}, resp.json)
def test_followers_collection(self, *_):
self.store_followers()
resp = self.client.get('/user.com/followers')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/followers',
'type': 'Collection',
'summary': "user.com's followers",
'totalItems': 2,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/followers',
'items': [ACTOR, ACTOR],
},
}, resp.json)
@patch('models.PAGE_SIZE', 1)
def test_followers_collection_page(self, *_):
self.store_followers()
before = (datetime.utcnow() + timedelta(seconds=1)).isoformat()
next = Follower.query(Follower.from_ == ActivityPub(id='http://baz').key,
Follower.to == self.user.key,
).get().updated.isoformat()
resp = self.client.get(f'/user.com/followers?before={before}')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': f'http://localhost/user.com/followers?before={before}',
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/followers',
'next': f'http://localhost/user.com/followers?before={next}',
'prev': f'http://localhost/user.com/followers?after={before}',
'items': [ACTOR],
}, resp.json)
def test_following_collection_unknown_user(self, *_):
resp = self.client.get('/nope.com/following')
self.assertEqual(404, resp.status_code)
def test_following_collection_empty(self, *_):
resp = self.client.get('/user.com/following')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/following',
'summary': "user.com's following",
'type': 'Collection',
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/following',
'items': [],
},
}, resp.json)
def store_following(self):
follow = Object(id=FOLLOW_WITH_ACTOR['id'], as2=FOLLOW_WITH_ACTOR).put()
Follower.get_or_create(
to=self.make_user('http://bar', cls=ActivityPub, obj_as2=ACTOR),
from_=self.user,
follow=follow)
Follower.get_or_create(
to=self.user,
from_=self.make_user('https://other.actor', cls=ActivityPub))
Follower.get_or_create(
to=self.make_user('http://baz', cls=ActivityPub, obj_as2=ACTOR),
from_=self.user, follow=follow)
Follower.get_or_create(
to=self.make_user('http://baj', cls=ActivityPub),
from_=self.user,
status='inactive')
def test_following_collection(self, *_):
self.store_following()
resp = self.client.get('/user.com/following')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/following',
'summary': "user.com's following",
'type': 'Collection',
'totalItems': 2,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/following',
'items': [ACTOR, ACTOR],
},
}, resp.json)
@patch('models.PAGE_SIZE', 1)
def test_following_collection_page(self, *_):
self.store_following()
after = datetime(1900, 1, 1).isoformat()
prev = Follower.query(Follower.to == ActivityPub(id='http://baz').key,
Follower.from_ == self.user.key,
).get().updated.isoformat()
resp = self.client.get(f'/user.com/following?after={after}')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': f'http://localhost/user.com/following?after={after}',
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/following',
'prev': f'http://localhost/user.com/following?after={prev}',
'next': f'http://localhost/user.com/following?before={after}',
'items': [ACTOR],
}, resp.json)
def test_outbox_fake(self, *_):
self.make_user('foo.com', cls=Fake)
resp = self.client.get(f'/ap/fake/foo.com/outbox')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/ap/fake/foo.com/outbox',
'summary': "foo.com's outbox",
'type': 'OrderedCollection',
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/ap/fake/foo.com/outbox',
'items': [],
},
}, resp.json)
def test_outbox_web(self, *_):
resp = self.client.get(f'/user.com/outbox')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/outbox',
'summary': "user.com's outbox",
'type': 'OrderedCollection',
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/outbox',
'items': [],
},
}, resp.json)
class ActivityPubUtilsTest(TestCase):
def setUp(self):
super().setUp()
g.user = self.make_user('user.com', has_hcard=True, obj_as2=ACTOR)
def test_put_validates_id(self, *_):
for bad in (
'',
'not a url',
'ftp://not.web/url',
'https:///no/domain',
'https://fed.brid.gy/foo',
'https://ap.brid.gy/foo',
'http://localhost/foo',
):
with self.assertRaises(AssertionError):
ActivityPub(id=bad).put()
def test_owns_id(self):
self.assertIsNone(ActivityPub.owns_id('http://foo'))
self.assertIsNone(ActivityPub.owns_id('https://bar/baz'))
self.assertFalse(ActivityPub.owns_id('at://did:plc:foo/bar/123'))
self.assertFalse(ActivityPub.owns_id('e45fab982'))
self.assertFalse(ActivityPub.owns_id('https://twitter.com/foo'))
self.assertFalse(ActivityPub.owns_id('https://fed.brid.gy/foo'))
def test_postprocess_as2_multiple_in_reply_tos(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'inReplyTo': 'foo',
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'inReplyTo': ['foo', 'bar'],
}))
def test_postprocess_as2_multiple_url(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'url': ['http://localhost/r/foo', 'http://localhost/r/bar'],
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'url': ['foo', 'bar'],
}))
def test_postprocess_as2_multiple_image(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'attachment': [{'url': 'http://r/foo'}, {'url': 'http://r/bar'}],
'image': [{'url': 'http://r/foo'}, {'url': 'http://r/bar'}],
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'image': [{'url': 'http://r/foo'}, {'url': 'http://r/bar'}],
}))
def test_postprocess_as2_actor_attributedTo(self):
g.user = Fake(id='site')
self.assert_equals({
'actor': {
'id': 'baj',
'preferredUsername': 'site',
'url': 'http://localhost/r/site',
'inbox': 'http://bf/fake/site/ap/inbox',
'outbox': 'http://bf/fake/site/ap/outbox',
},
'attributedTo': [{
'id': 'bar',
'preferredUsername': 'site',
'url': 'http://localhost/r/site',
'inbox': 'http://bf/fake/site/ap/inbox',
'outbox': 'http://bf/fake/site/ap/outbox',
}, {
'id': 'baz',
'preferredUsername': 'site',
'url': 'http://localhost/r/site',
'inbox': 'http://bf/fake/site/ap/inbox',
'outbox': 'http://bf/fake/site/ap/outbox',
}],
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'attributedTo': [{'id': 'bar'}, {'id': 'baz'}],
'actor': {'id': 'baj'},
}))
def test_postprocess_as2_note(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'type': 'Note',
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'type': 'Note',
}))
def test_postprocess_as2_hashtag(self):
"""https://github.com/snarfed/bridgy-fed/issues/45"""
self.assert_equals({
'tag': [
{'type': 'Hashtag', 'name': '#bar', 'href': 'bar'},
{'type': 'Hashtag', 'name': '#baz', 'href': 'http://localhost/hashtag/baz'},
{'type': 'Mention', 'href': 'foo'},
],
'to': ['https://www.w3.org/ns/activitystreams#Public'],
}, postprocess_as2({
'tag': [
{'name': 'bar', 'href': 'bar'},
{'type': 'Tag', 'name': '#baz'},
# should leave alone
{'type': 'Mention', 'href': 'foo'},
],
}))
def test_postprocess_as2_url_attachments(self):
got = postprocess_as2(as2.from_as1({
'objectType': 'person',
'urls': [
{
'value': 'https://user.com/about-me',
'displayName': 'Mrs. \u2615 Foo',
}, {
'value': 'https://user.com/',
'displayName': 'should be ignored',
}, {
'value': 'http://one',
'displayName': 'one text',
}, {
'value': 'https://two',
'displayName': 'two title',
},
]
}))
self.assert_equals([{
'type': 'PropertyValue',
'name': 'Mrs. ☕ Foo',
'value': '<a rel="me" href="https://user.com/about-me"><span class="invisible">https://</span>user.com/about-me<span class="invisible"></span></a>',
}, {
'type': 'PropertyValue',
'name': 'Web site',
'value': '<a rel="me" href="https://user.com/"><span class="invisible">https://</span>user.com<span class="invisible">/</span></a>',
}, {
'type': 'PropertyValue',
'name': 'one text',
'value': '<a rel="me" href="http://one"><span class="invisible">http://</span>one<span class="invisible"></span></a>',
}, {
'type': 'PropertyValue',
'name': 'two title',
'value': '<a rel="me" href="https://two"><span class="invisible">https://</span>two<span class="invisible"></span></a>',
}], got['attachment'])
def test_postprocess_as2_preserves_preferredUsername(self):
# preferredUsername stays y.z despite user's username. since Mastodon
# queries Webfinger for [email protected]
# https://github.com/snarfed/bridgy-fed/issues/77#issuecomment-949955109
self.assertEqual('user.com', postprocess_as2({
'type': 'Person',
'url': 'https://user.com/about-me',
'preferredUsername': 'nick',
'attachment': [{
'type': 'PropertyValue',
'name': 'nick',
'value': '<a rel="me" href="https://user.com/about-me"><span class="invisible">https://</span>user.com/about-me<span class="invisible"></span></a>',
}],
})['preferredUsername'])
# TODO: make these generic and use Fake
@patch('requests.get')
def test_load_http(self, mock_get):
mock_get.return_value = AS2
id = 'http://the/id'
self.assertIsNone(Object.get_by_id(id))
# first time fetches over HTTP
got = ActivityPub.load(id)
self.assert_equals(id, got.key.id())
self.assert_equals(AS2_OBJ, got.as2)
mock_get.assert_has_calls([self.as2_req(id)])
# second time is in cache
got.key.delete()
mock_get.reset_mock()
got = ActivityPub.load(id)
self.assert_equals(id, got.key.id())
self.assert_equals(AS2_OBJ, got.as2)
mock_get.assert_not_called()
@patch('requests.get')
def test_load_datastore(self, mock_get):
id = 'http://the/id'
stored = Object(id=id, as2=AS2_OBJ)
stored.put()
protocol.objects_cache.clear()
# first time loads from datastore
got = ActivityPub.load(id)
self.assert_entities_equal(stored, got)
mock_get.assert_not_called()
# second time is in cache
stored.key.delete()
got = ActivityPub.load(id)
self.assert_entities_equal(stored, got)
mock_get.assert_not_called()
@patch('requests.get')
def test_load_preserves_fragment(self, mock_get):
stored = Object(id='http://the/id#frag', as2=AS2_OBJ)
stored.put()
protocol.objects_cache.clear()
got = ActivityPub.load('http://the/id#frag')
self.assert_entities_equal(stored, got)
mock_get.assert_not_called()
@patch('requests.get')
def test_load_datastore_no_as2(self, mock_get):
"""If the stored Object has no as2, we should fall back to HTTP."""
id = 'http://the/id'
stored = Object(id=id, as2={}, status='in progress')
stored.put()
protocol.objects_cache.clear()
mock_get.return_value = AS2
got = ActivityPub.load(id)
mock_get.assert_has_calls([self.as2_req(id)])
self.assert_equals(id, got.key.id())
self.assert_equals(AS2_OBJ, got.as2)
mock_get.assert_has_calls([self.as2_req(id)])
self.assert_object(id,
as2=AS2_OBJ,
as1={**AS2_OBJ, 'id': id},
source_protocol='activitypub',
# check that it reused our original Object
status='in progress')
@patch('requests.get')
def test_signed_get_redirects_manually_with_new_sig_headers(self, mock_get):
mock_get.side_effect = [
requests_response(status=302, redirected_url='http://second',
allow_redirects=False),
requests_response(status=200, allow_redirects=False),
]
activitypub.signed_get('https://first')
first = mock_get.call_args_list[0][1]
second = mock_get.call_args_list[1][1]
self.assertNotEqual(first['headers'], second['headers'])
@patch('requests.get')
def test_signed_get_redirects_to_relative_url(self, mock_get):
mock_get.side_effect = [
# redirected URL is relative, we have to resolve it
requests_response(status=302, redirected_url='/second',
allow_redirects=False),
requests_response(status=200, allow_redirects=False),
]
activitypub.signed_get('https://first')
self.assertEqual(('https://first/second',), mock_get.call_args_list[1][0])
first = mock_get.call_args_list[0][1]
second = mock_get.call_args_list[1][1]
# headers are equal because host is the same
self.assertEqual(first['headers'], second['headers'])
self.assertEqual(
first['auth'].header_signer.sign(first['headers'], method='GET', path='/'),
second['auth'].header_signer.sign(second['headers'], method='GET', path='/'))
@patch('requests.post')
def test_signed_post_ignores_redirect(self, mock_post):
mock_post.side_effect = [
requests_response(status=302, redirected_url='http://second',
allow_redirects=False),
]
resp = activitypub.signed_post('https://first')
mock_post.assert_called_once()
self.assertEqual(302, resp.status_code)
@patch('requests.get')
def test_fetch_direct(self, mock_get):
mock_get.return_value = AS2
obj = Object(id='http://orig')
ActivityPub.fetch(obj)
self.assertEqual(AS2_OBJ, obj.as2)
mock_get.assert_has_calls((
self.as2_req('http://orig'),
))
@patch('requests.get')
def test_fetch_direct_ld_content_type(self, mock_get):
mock_get.return_value = requests_response(AS2_OBJ, headers={
'Content-Type': 'application/ld+json; profile="https://www.w3.org/ns/activitystreams"',
})
obj = Object(id='http://orig')
ActivityPub.fetch(obj)
self.assertEqual(AS2_OBJ, obj.as2)
mock_get.assert_has_calls((
self.as2_req('http://orig'),
))
@patch('requests.get')
def test_fetch_via_html(self, mock_get):
mock_get.side_effect = [HTML_WITH_AS2, AS2]
obj = Object(id='http://orig')
ActivityPub.fetch(obj)
self.assertEqual(AS2_OBJ, obj.as2)
mock_get.assert_has_calls((
self.as2_req('http://orig'),
self.as2_req('http://as2', headers=as2.CONNEG_HEADERS),
))
@patch('requests.get')
def test_fetch_only_html(self, mock_get):
mock_get.return_value = HTML
obj = Object(id='http://orig')
self.assertFalse(ActivityPub.fetch(obj))
self.assertIsNone(obj.as1)
@patch('requests.get')
def test_fetch_not_acceptable(self, mock_get):
mock_get.return_value = NOT_ACCEPTABLE
obj = Object(id='http://orig')
self.assertFalse(ActivityPub.fetch(obj))
self.assertIsNone(obj.as1)
@patch('requests.get')
def test_fetch_ssl_error(self, mock_get):
mock_get.side_effect = requests.exceptions.SSLError
with self.assertRaises(BadGateway):
ActivityPub.fetch(Object(id='http://orig'))
@patch('requests.get')
def test_fetch_no_content(self, mock_get):
mock_get.return_value = self.as2_resp('')
with self.assertRaises(BadGateway):
ActivityPub.fetch(Object(id='http://the/id'))
mock_get.assert_has_calls([self.as2_req('http://the/id')])
@patch('requests.get')
def test_fetch_not_json(self, mock_get):
mock_get.return_value = self.as2_resp('XYZ not JSON')
with self.assertRaises(BadGateway):
ActivityPub.fetch(Object(id='http://the/id'))
mock_get.assert_has_calls([self.as2_req('http://the/id')])
def test_fetch_non_url(self):
obj = Object(id='x y z')
self.assertFalse(ActivityPub.fetch(obj))
self.assertIsNone(obj.as1)
@skip
def test_serve(self):
obj = Object(id='http://orig', as2=LIKE)
self.assertEqual((LIKE_WRAPPED, {'Content-Type': 'application/activity+json'}),
ActivityPub.serve(obj))
def test_postprocess_as2_idempotent(self):
g.user = self.make_user('foo.com')
for obj in (ACTOR, REPLY_OBJECT, REPLY_OBJECT_WRAPPED, REPLY,
NOTE_OBJECT, NOTE, MENTION_OBJECT, MENTION, LIKE,
LIKE_WRAPPED, REPOST, FOLLOW, FOLLOW_WRAPPED, ACCEPT,
UNDO_FOLLOW_WRAPPED, DELETE, UPDATE_NOTE,
# TODO: these currently fail
# LIKE_WITH_ACTOR, REPOST_FULL, FOLLOW_WITH_ACTOR,
# FOLLOW_WRAPPED_WITH_ACTOR, FOLLOW_WITH_OBJECT, UPDATE_PERSON,
):
with self.subTest(obj=obj):
obj = copy.deepcopy(obj)
self.assert_equals(postprocess_as2(obj),
postprocess_as2(postprocess_as2(obj)),
ignore=['to'])
def test_ap_address(self):
user = ActivityPub(obj=Object(id='a', as2={**ACTOR, 'preferredUsername': 'me'}))
self.assertEqual('@[email protected]', user.ap_address())
self.assertEqual('@[email protected]', user.readable_id)
user.obj.as2 = ACTOR
self.assertEqual('@[email protected]', user.ap_address())
self.assertEqual('@[email protected]', user.readable_id)
user = ActivityPub(id='https://mas.to/users/alice')
self.assertEqual('@[email protected]', user.ap_address())
self.assertEqual('@[email protected]', user.readable_id)
def test_ap_actor(self):
user = self.make_user('http://foo/actor', cls=ActivityPub)
self.assertEqual('http://foo/actor', user.ap_actor())
def test_web_url(self):
user = self.make_user('http://foo/actor', cls=ActivityPub)
self.assertEqual('http://foo/actor', user.web_url())
user.obj = Object(id='a', as2=copy.deepcopy(ACTOR)) # no url
self.assertEqual('http://foo/actor', user.web_url())
user.obj.as2['url'] = ['http://my/url']
self.assertEqual('http://my/url', user.web_url())
def test_readable_id(self):
user = self.make_user('http://foo', cls=ActivityPub)
self.assertIsNone(user.readable_id)
self.assertEqual('http://foo', user.readable_or_key_id())
user.obj = Object(id='a', as2=ACTOR)
self.assertEqual('@[email protected]', user.readable_id)
self.assertEqual('@[email protected]', user.readable_or_key_id())
@skip
def test_target_for_not_activitypub(self):
with self.assertRaises(AssertionError):
ActivityPub.target_for(Object(source_protocol='web'))
def test_target_for_actor(self):
self.assertEqual(ACTOR['inbox'], ActivityPub.target_for(
Object(source_protocol='ap', as2=ACTOR)))
actor = copy.deepcopy(ACTOR)
del actor['inbox']
self.assertIsNone(ActivityPub.target_for(
Object(source_protocol='ap', as2=actor)))
actor['publicInbox'] = 'so-public'
self.assertEqual('so-public', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor)))
# sharedInbox
self.assertEqual('so-public', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor), shared=True))
actor['endpoints'] = {
'sharedInbox': 'so-shared',
}
self.assertEqual('so-public', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor)))
self.assertEqual('so-shared', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor), shared=True))
def test_target_for_object(self):
obj = Object(as2=NOTE_OBJECT, source_protocol='ap')
self.assertIsNone(ActivityPub.target_for(obj))
Object(id=ACTOR['id'], as2=ACTOR).put()
obj.as2 = {
**NOTE_OBJECT,
'author': ACTOR['id'],
}
self.assertEqual('http://mas.to/inbox', ActivityPub.target_for(obj))
del obj.as2['author']
obj.as2['actor'] = copy.deepcopy(ACTOR)
obj.as2['actor']['url'] = [obj.as2['actor'].pop('id')]
self.assertEqual('http://mas.to/inbox', ActivityPub.target_for(obj))
@patch('requests.get')
def test_target_for_object_fetch(self, mock_get):
mock_get.return_value = self.as2_resp(ACTOR)
obj = Object(as2={
**NOTE_OBJECT,
'author': 'http://the/author',
}, source_protocol='ap')
self.assertEqual('http://mas.to/inbox', ActivityPub.target_for(obj))
mock_get.assert_has_calls([self.as2_req('http://the/author')])
@patch('requests.get')
def test_target_for_author_is_object_id(self, mock_get):
obj = self.store_object(id='http://the/author', our_as1={
'author': 'http://the/author',
})
# test is that we short circuit out instead of infinite recursion
self.assertIsNone(ActivityPub.target_for(obj))
@patch('requests.post')
def test_send_blocklisted(self, mock_post):
self.assertFalse(ActivityPub.send(Object(as2=NOTE),
'https://fed.brid.gy/ap/sharedInbox'))
mock_post.assert_not_called()
|
snarfed/bridgy-fed
|
tests/test_activitypub.py
|
test_activitypub.py
|
py
| 76,984 |
python
|
en
|
code
| 219 |
github-code
|
6
|
[
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "common.CONTENT_TYPE_HTML",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "common.CONTENT_TYPE_HTML",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "granary.as2.CONTENT_TYPE",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "testutil.TestCase",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "google.cloud.ndb.Key",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 279,
"usage_type": "argument"
},
{
"api_name": "google.cloud.ndb",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "google.cloud.ndb.Key",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 280,
"usage_type": "argument"
},
{
"api_name": "google.cloud.ndb",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "hashlib.sha256",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "granary.as2.CONTENT_TYPE",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "httpsig.HeaderSigner",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.util.json_dumps",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "testutil.Fake",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "granary.as2.CONTENT_TYPE",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "granary.as2.CONTENT_TYPE",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions.ReadTimeoutError",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 401,
"usage_type": "argument"
},
{
"api_name": "copy.deepcopy",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 535,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 536,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 539,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 541,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 542,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 543,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 544,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 554,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 563,
"usage_type": "name"
},
{
"api_name": "common.redirect_unwrap",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "google.cloud.ndb.Key",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 571,
"usage_type": "argument"
},
{
"api_name": "google.cloud.ndb",
"line_number": 571,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "granary.microformats2.object_to_json",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "granary.microformats2",
"line_number": 589,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 589,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 623,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 623,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 625,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 626,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 626,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 627,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 628,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 628,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 649,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 662,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 678,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 688,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 688,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 694,
"usage_type": "call"
},
{
"api_name": "models.Object.get_by_id",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 700,
"usage_type": "name"
},
{
"api_name": "models.Object.get_by_id",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 701,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 704,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 708,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 732,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 732,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 744,
"usage_type": "argument"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 759,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 759,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 783,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 798,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 798,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 806,
"usage_type": "call"
},
{
"api_name": "requests.ConnectionError",
"line_number": 807,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 818,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 818,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 831,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 838,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.util.json_loads",
"line_number": 851,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 863,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 864,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 866,
"usage_type": "call"
},
{
"api_name": "models.Follower.query",
"line_number": 867,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 867,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 870,
"usage_type": "argument"
},
{
"api_name": "web.Web",
"line_number": 872,
"usage_type": "argument"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 878,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 883,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "models.Follower.query",
"line_number": 890,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 890,
"usage_type": "name"
},
{
"api_name": "models.Follower",
"line_number": 892,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 893,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 895,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 908,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 909,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 909,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 917,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 926,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 926,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 927,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 927,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 930,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 936,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 945,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 950,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 956,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 961,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 963,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 963,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 964,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 964,
"usage_type": "name"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 972,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 977,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 979,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 979,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 980,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 980,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 983,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1020,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1020,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1021,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1021,
"usage_type": "name"
},
{
"api_name": "models.Object.get_by_id",
"line_number": 1027,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1027,
"usage_type": "name"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 1036,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1036,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.util.json_dumps",
"line_number": 1048,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.util.json_dumps",
"line_number": 1049,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1065,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1065,
"usage_type": "name"
},
{
"api_name": "protocol.seen_ids.clear",
"line_number": 1072,
"usage_type": "call"
},
{
"api_name": "protocol.seen_ids",
"line_number": 1072,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.ndb.Key",
"line_number": 1073,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1073,
"usage_type": "argument"
},
{
"api_name": "google.cloud.ndb",
"line_number": 1073,
"usage_type": "name"
},
{
"api_name": "protocol.seen_ids.clear",
"line_number": 1086,
"usage_type": "call"
},
{
"api_name": "protocol.seen_ids",
"line_number": 1086,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.ndb.Key",
"line_number": 1087,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1087,
"usage_type": "argument"
},
{
"api_name": "google.cloud.ndb",
"line_number": 1087,
"usage_type": "name"
},
{
"api_name": "protocol.seen_ids.clear",
"line_number": 1097,
"usage_type": "call"
},
{
"api_name": "protocol.seen_ids",
"line_number": 1097,
"usage_type": "attribute"
},
{
"api_name": "protocol.seen_ids.clear",
"line_number": 1106,
"usage_type": "call"
},
{
"api_name": "protocol.seen_ids",
"line_number": 1106,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1029,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 1029,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1030,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 1030,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1031,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1114,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1114,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 1115,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1115,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1116,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1116,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 1117,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1117,
"usage_type": "name"
},
{
"api_name": "testutil.Fake.get_or_create",
"line_number": 1118,
"usage_type": "call"
},
{
"api_name": "testutil.Fake",
"line_number": 1118,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1120,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1120,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.get_or_create",
"line_number": 1121,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1121,
"usage_type": "name"
},
{
"api_name": "models.Follower.query",
"line_number": 1122,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1122,
"usage_type": "name"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 1132,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1132,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1134,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 1142,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1142,
"usage_type": "attribute"
},
{
"api_name": "models.Object",
"line_number": 1149,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1165,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1165,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1166,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1166,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1171,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1175,
"usage_type": "attribute"
},
{
"api_name": "models.Object",
"line_number": 1179,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1193,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1193,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1203,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1203,
"usage_type": "name"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1205,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1205,
"usage_type": "name"
},
{
"api_name": "models.Object.get_by_id",
"line_number": 1214,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1214,
"usage_type": "name"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1215,
"usage_type": "attribute"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1223,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1224,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions.ReadTimeoutError",
"line_number": 1226,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1237,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1238,
"usage_type": "call"
},
{
"api_name": "granary.as2.to_as1",
"line_number": 1251,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1251,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1256,
"usage_type": "call"
},
{
"api_name": "models.Follower.query",
"line_number": 1260,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1260,
"usage_type": "name"
},
{
"api_name": "models.Follower.query",
"line_number": 1266,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1266,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1289,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1291,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1291,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1293,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1295,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1295,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1296,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1298,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1298,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1300,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1302,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1302,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 1304,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 1308,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 1346,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 1346,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 1346,
"usage_type": "call"
},
{
"api_name": "models.Follower.query",
"line_number": 1347,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1347,
"usage_type": "name"
},
{
"api_name": "models.Follower.from_",
"line_number": 1347,
"usage_type": "attribute"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1347,
"usage_type": "call"
},
{
"api_name": "models.Follower.to",
"line_number": 1348,
"usage_type": "attribute"
},
{
"api_name": "models.Follower",
"line_number": 1348,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1343,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1384,
"usage_type": "call"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1386,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1386,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1387,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1390,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1390,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1392,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1393,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1393,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1394,
"usage_type": "name"
},
{
"api_name": "models.Follower.get_or_create",
"line_number": 1396,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1396,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1397,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 1422,
"usage_type": "call"
},
{
"api_name": "models.Follower.query",
"line_number": 1423,
"usage_type": "call"
},
{
"api_name": "models.Follower",
"line_number": 1423,
"usage_type": "name"
},
{
"api_name": "models.Follower.to",
"line_number": 1423,
"usage_type": "attribute"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1423,
"usage_type": "call"
},
{
"api_name": "models.Follower.from_",
"line_number": 1424,
"usage_type": "attribute"
},
{
"api_name": "models.Follower",
"line_number": 1424,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1419,
"usage_type": "call"
},
{
"api_name": "testutil.Fake",
"line_number": 1440,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "testutil.TestCase",
"line_number": 1473,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 1476,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 1476,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1489,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.owns_id",
"line_number": 1492,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1492,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.owns_id",
"line_number": 1493,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1493,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.owns_id",
"line_number": 1494,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1494,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.owns_id",
"line_number": 1495,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1495,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.owns_id",
"line_number": 1497,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1497,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.owns_id",
"line_number": 1498,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1498,
"usage_type": "name"
},
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 1504,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 1504,
"usage_type": "name"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1505,
"usage_type": "call"
},
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 1514,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 1514,
"usage_type": "name"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1515,
"usage_type": "call"
},
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 1525,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 1525,
"usage_type": "name"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1526,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 1532,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 1532,
"usage_type": "name"
},
{
"api_name": "testutil.Fake",
"line_number": 1532,
"usage_type": "call"
},
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 1554,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 1554,
"usage_type": "name"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1555,
"usage_type": "call"
},
{
"api_name": "granary.as2.PUBLIC_AUDIENCE",
"line_number": 1564,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 1564,
"usage_type": "name"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1565,
"usage_type": "call"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1579,
"usage_type": "call"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1589,
"usage_type": "call"
},
{
"api_name": "granary.as2.from_as1",
"line_number": 1589,
"usage_type": "call"
},
{
"api_name": "granary.as2",
"line_number": 1589,
"usage_type": "name"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1631,
"usage_type": "call"
},
{
"api_name": "models.Object.get_by_id",
"line_number": 1648,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1648,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.load",
"line_number": 1651,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1651,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.load",
"line_number": 1660,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1660,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1643,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1668,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 1670,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1670,
"usage_type": "attribute"
},
{
"api_name": "activitypub.ActivityPub.load",
"line_number": 1673,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1673,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.load",
"line_number": 1679,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1679,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1665,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1685,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 1687,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1687,
"usage_type": "attribute"
},
{
"api_name": "activitypub.ActivityPub.load",
"line_number": 1689,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1689,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1683,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1697,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache.clear",
"line_number": 1699,
"usage_type": "call"
},
{
"api_name": "protocol.objects_cache",
"line_number": 1699,
"usage_type": "attribute"
},
{
"api_name": "activitypub.ActivityPub.load",
"line_number": 1702,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1702,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1693,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1719,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1721,
"usage_type": "call"
},
{
"api_name": "activitypub.signed_get",
"line_number": 1723,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1716,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1733,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1735,
"usage_type": "call"
},
{
"api_name": "activitypub.signed_get",
"line_number": 1737,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1729,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1753,
"usage_type": "call"
},
{
"api_name": "activitypub.signed_post",
"line_number": 1757,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1750,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1764,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1765,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1765,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1761,
"usage_type": "call"
},
{
"api_name": "oauth_dropins.webutil.testutil.requests_response",
"line_number": 1774,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1777,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1778,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1778,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1772,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1788,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1789,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1789,
"usage_type": "name"
},
{
"api_name": "granary.as2.CONNEG_HEADERS",
"line_number": 1794,
"usage_type": "attribute"
},
{
"api_name": "granary.as2",
"line_number": 1794,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1785,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1801,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1802,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1802,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1797,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1809,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1810,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1810,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1805,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 1815,
"usage_type": "attribute"
},
{
"api_name": "werkzeug.exceptions.BadGateway",
"line_number": 1816,
"usage_type": "argument"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1817,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1817,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1817,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1813,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.BadGateway",
"line_number": 1823,
"usage_type": "argument"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1824,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1824,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1824,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1819,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.BadGateway",
"line_number": 1832,
"usage_type": "argument"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1833,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1833,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1833,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1828,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1838,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.fetch",
"line_number": 1839,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1839,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1844,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.serve",
"line_number": 1846,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1846,
"usage_type": "name"
},
{
"api_name": "unittest.skip",
"line_number": 1842,
"usage_type": "name"
},
{
"api_name": "flask.g.user",
"line_number": 1849,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 1849,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 1860,
"usage_type": "call"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1861,
"usage_type": "call"
},
{
"api_name": "activitypub.postprocess_as2",
"line_number": 1862,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1866,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1866,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1874,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1879,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1883,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1886,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 1886,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1893,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1897,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1904,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1904,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1904,
"usage_type": "call"
},
{
"api_name": "unittest.skip",
"line_number": 1901,
"usage_type": "name"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1907,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1907,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1908,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 1910,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1912,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1912,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1913,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1916,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1916,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1917,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1920,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1920,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1921,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1925,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1925,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1926,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1927,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1927,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1928,
"usage_type": "call"
},
{
"api_name": "models.Object",
"line_number": 1931,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1932,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1932,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1934,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1939,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1939,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 1942,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1944,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1944,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1950,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1954,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1954,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1946,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.target_for",
"line_number": 1963,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1963,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1957,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub.send",
"line_number": 1967,
"usage_type": "call"
},
{
"api_name": "activitypub.ActivityPub",
"line_number": 1967,
"usage_type": "name"
},
{
"api_name": "models.Object",
"line_number": 1967,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 1965,
"usage_type": "call"
}
] |
27679859460
|
# Things to show
# Name, Orbital Radius, Gravity, Mass, Distance, Planet Type, Goldilock, Discovery Date, Mass of hoststar
from flask import Flask, jsonify, make_response
from pandas import read_csv
app = Flask(__name__)
data = read_csv("csv/display.csv")
@app.get("/")
def index():
to_send = []
i = 1
while True:
res = get_data(i)
if res[0] == False:
break
to_send.append(res[1])
i += 1
return cors(jsonify(to_send))
@app.route("/home")
def get_home():
to_send = []
columns = data.columns[1:]
columns_to_share = ["name", "planet_type"]
i = 0
while True:
try:
planet_data = {}
response = data.iloc[i].to_json(orient='records')[1:-1].split(",")[1:]
for j, item in enumerate(response):
if columns[j] in columns_to_share:
planet_data.update({ columns[j]: item.replace("\"", "").replace("\"", "") })
planet_data.update({ "index": i })
to_send.append(planet_data)
except:
break
i += 1
return to_send
@app.route("/get/<int:i>")
def get_data_end_point(i):
return cors(jsonify(get_data(i)))
def get_data(i):
try:
to_send = {}
columns = data.columns[1:]
response = data.iloc[i].to_json(orient='records')[1:-1].split(",")[1:]
for j, item in enumerate(response):
to_send.update({ columns[j]: item.replace("\"", "").replace("\"", "") })
return [True, to_send]
except:
return [False, {}]
def cors(res):
res.headers.add("Access-Control-Allow-Origin", "*")
return res
if __name__ == "__main__":
app.run()
|
CometConnect/python
|
api.py
|
api.py
|
py
| 1,549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 45,
"usage_type": "call"
}
] |
71186923
|
import boto3
import uuid
import json
from jwcrypto import jwt, jwk
DDB_CLIENT = boto3.client('dynamodb')
ddb_table = "iowt-devices"
def create_new_device():
id = str(uuid.uuid4())
key = jwk.JWK(generate="oct", size=256)
key_data = json.loads(key.export())['k']
token = jwt.JWT(header={"alg": "A256KW", "enc": "A256CBC-HS512"},
claims={"device_id": id})
token.make_encrypted_token(key)
return id, key_data, token.serialize()
device_id, key, token = create_new_device()
db_item = dict()
db_item['id'] = {'S': device_id}
db_item['deviceLocation'] = {'S': "Not Set"}
db_item['deviceName'] = {'S': "Not Set"}
db_item['deviceKey'] = {'S': key}
db_item['deviceToken'] = {'S': token}
db_item['deviceStatus'] = {'S': "new"}
db_item['deviceOwner'] = {'S': "none"}
DDB_CLIENT.put_item(TableName=ddb_table,
Item=db_item)
|
wilsonc101/iowt
|
www/create_device.py
|
create_device.py
|
py
| 886 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jwcrypto.jwk.JWK",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "jwcrypto.jwk",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jwcrypto.jwt.JWT",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jwcrypto.jwt",
"line_number": 16,
"usage_type": "name"
}
] |
42896164462
|
import jax
import numpy as np
import pytest
import hilbert_sort.jax as jax_backend
import hilbert_sort.numba as np_backend
@pytest.fixture(scope="module", autouse=True)
def config_pytest():
jax.config.update("jax_enable_x64", True)
@pytest.mark.parametrize("dim_x", [2, 3, 4])
@pytest.mark.parametrize("N", [150, 250])
@pytest.mark.parametrize("seed", [0, 42, 666])
def test_random_agree(dim_x, N, seed):
np.random.seed(seed)
x = np.random.randn(N, dim_x)
np_res = np_backend.hilbert_sort(x)
jax_res = jax_backend.hilbert_sort(x)
np.testing.assert_allclose(np_res, jax_res)
@pytest.mark.parametrize("nDests", [2, 3, 4, 5])
@pytest.mark.parametrize("N", [150, 250])
@pytest.mark.parametrize("seed", [0, 42, 666])
def test_transpose_bits(nDests, N, seed):
np.random.seed(seed)
x = np.random.randint(0, 150021651, (5,))
np_res = np_backend.transpose_bits(x, nDests)
jax_res = jax_backend.transpose_bits(x, nDests)
np.testing.assert_allclose(np_res, jax_res)
@pytest.mark.parametrize("nDests", [5, 7, 12])
@pytest.mark.parametrize("N", [150, 250])
@pytest.mark.parametrize("seed", [0, 42, 666])
def test_unpack_coords(nDests, N, seed):
np.random.seed(seed)
x = np.random.randint(0, 150021651, (nDests,))
max_int = 150021651
np_res = np_backend.unpack_coords(x)
jax_res = jax_backend.unpack_coords(x, max_int)
np.testing.assert_allclose(np_res, jax_res)
def test_gray_decode():
for n in range(5, 1_000):
np_res = np_backend.gray_decode(n)
jax_res = jax_backend.gray_decode(n)
np.testing.assert_allclose(np_res, jax_res)
|
AdrienCorenflos/parallel-Hilbert
|
tests/test_agree.py
|
test_agree.py
|
py
| 1,622 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "jax.config.update",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "jax.config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "hilbert_sort.numba.hilbert_sort",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.numba",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "hilbert_sort.jax.hilbert_sort",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.jax",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "hilbert_sort.numba.transpose_bits",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.numba",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "hilbert_sort.jax.transpose_bits",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.jax",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "hilbert_sort.numba.unpack_coords",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.numba",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "hilbert_sort.jax.unpack_coords",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.jax",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "hilbert_sort.numba.gray_decode",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.numba",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "hilbert_sort.jax.gray_decode",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "hilbert_sort.jax",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "numpy.testing.assert_allclose",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.testing",
"line_number": 52,
"usage_type": "attribute"
}
] |
29099504157
|
import xml.etree.ElementTree as ET
from datetime import date
from pathlib import Path
def _convert_dict(temp_dict):
"""
Convert one dict to a new one
:param temp_dict: A temporary dict
:type temp_dict: dict
:return: The same dict in a new formate that fits with the database
:rtype: dict
"""
data_dict = {}
for transferee in temp_dict:
counter = 0
for transferee_counter in temp_dict[transferee]["transferees"]:
temp_name = f"{transferee}_{counter}"
data_dict[temp_name] = {}
# for info in data_dict[transferee]["transferees"][transferee_counter]:
data_dict[temp_name]["SourceBarcode"] = temp_dict[transferee]["source"]
data_dict[temp_name]["SourceWell"] = temp_dict[transferee]["transferees"][transferee_counter]["source_well"]
data_dict[temp_name]["DestinationBarcode"] = temp_dict[transferee]["destination"]
data_dict[temp_name]["DestinationWell"] = temp_dict[transferee]["transferees"][transferee_counter]["destination_well"]
data_dict[temp_name]["Volume"] = temp_dict[transferee]["transferees"][transferee_counter]["transferee_volume"]
counter += 1
return data_dict
def _get_transferee_dict(file_list):
"""
Translate XML file_lise in to two dict
:param file_list: A list of files
:type file_list: list
:return:
- transferee: what have been transfereed
- destination_plates: What plate the transferee have gone to
:rtype:
- dict
- dict
"""
transferee = {}
destination_plates = {}
# source_plate = {}
for i in file_list:
doc = ET.parse(i)
root = doc.getroot()
for dates in root.iter("transfer"):
date_running = dates.get("date")
date_str = f"plate_production_{date_running}"
transferee[date_str] = {}
# finds barcode for source and destination
for plates in root.iter("plate"):
source_destination = plates.get("type")
barcode = plates.get("barcode")
transferee[date_str][source_destination] = barcode
# if plates.get("type") == "source":
# source_plate[barcode] = {}
# source_plate[barcode]["SourceBarcode"] = barcode
# source_plate[barcode]["date"] = date.today()
if plates.get("type") == "destination":
destination_plates[barcode] = {}
destination_plates[barcode]["DestinationBarcode"] = barcode
destination_plates[barcode]["date"] = date.today()
# find source, destination and volume for each transferee
for wells_t in root.iter("printmap"):
wells_transferee = int(wells_t.get("total"))
transferee[date_str]["transferees"] = {}
for counter in range(wells_transferee):
temp_str = f"Transferee_{counter + 1}"
transferee[date_str]["transferees"][temp_str] = {}
wells_source = wells_t[counter].get("n")
wells_destination = wells_t[counter].get("dn")
transferee_volume = float(wells_t[counter].get("vt")) * 10e-6
transferee[date_str]["transferees"][temp_str]["source_well"] = wells_source
transferee[date_str]["transferees"][temp_str]["destination_well"] = wells_destination
transferee[date_str]["transferees"][temp_str]["transferee_volume"] = transferee_volume
# find source, destination and reason for each skipped well
for wells in root.iter("skippedwells"):
wells_skipped = int(wells.get("total"))
transferee[date_str]["Skipped"] = {}
# finds destination and source wells data
for z in range(wells_skipped):
temp_str = f"Skipped_{z + 1}"
transferee[date_str]["Skipped"][temp_str] = {}
wells_destination = wells[z].get("dn")
wells_source = wells[z].get("n")
reason = wells[z].get("reason")
transferee[date_str]["Skipped"][temp_str]["source_well"] = wells_source
transferee[date_str]["Skipped"][temp_str]["destination_well"] = wells_destination
transferee[date_str]["Skipped"][temp_str]["reason"] = reason
return transferee, destination_plates
def xml_controller(file_list):
"""
Controls the XML reader
:param file_list: List of files with XML data
:type file_list: list
:return:
- transferee: what have been transfereed
- destination_plates: What plate the transferee have gone to
:rtype:
- dict
- dict
"""
transferee_dict, destination_plates = _get_transferee_dict(file_list)
data_dict = _convert_dict(transferee_dict)
return data_dict, destination_plates
def convert_echo_to_db(files):
echo_to_db = {}
transfer_counter = 0
for file_index, files in enumerate(files):
files = Path(files)
if files.name.startswith("Transfer"):
doc = ET.parse(files)
root = doc.getroot()
# for counting plates and transferees
for plates in root.iter("plate"):
barcode = plates.get("barcode")
source_destination = plates.get("type")
if source_destination == "destination":
temp_d_barcode = barcode
if source_destination == "source":
temp_s_barcode = barcode
try:
echo_to_db[temp_d_barcode]
except KeyError:
echo_to_db[temp_d_barcode] = {"skipped_wells": {},
"transferred_wells": {}}
for wells in root.iter("printmap"):
wells_transferred = wells.get("total")
if int(wells_transferred) != 0:
for z in range(int(wells_transferred)):
destination_well = wells[z].get("dn")
source_well = wells[z].get("n")
vol = wells[z].get("vt")
echo_to_db[temp_d_barcode]["transferred_wells"][destination_well] = {
"mp_source_plate": temp_s_barcode,
"mp_source_well": source_well,
"vol": vol}
for wells in root.iter("skippedwells"):
wells_skipped = wells.get("total")
if int(wells_skipped) != 0:
transfer_counter += 1
for z in range(int(wells_skipped)):
destination_well = wells[z].get("dn")
source_well = wells[z].get("n")
reason = wells[z].get("reason")
reason = reason.split(":")[0]
echo_to_db[temp_d_barcode]["skipped_wells"][destination_well] = {
"mp_source_plate": temp_s_barcode,
"mp_source_well": source_well,
"reason": reason}
return echo_to_db
if __name__ == "__main__":
path = "2022-03-03"
from file_handler import get_file_list
file_list = get_file_list(path)
data, test = xml_controller(file_list)
print(data)
|
ZexiDilling/structure_search
|
xml_handler.py
|
xml_handler.py
|
py
| 7,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "file_handler.get_file_list",
"line_number": 192,
"usage_type": "call"
}
] |
39001711691
|
import csv
import MySQLdb
mydb= MySQLdb.connect(host='localhost',
user='root',
db='celebal')
cursor=mydb.cursor()
with open('dataset1.csv', 'r') as csvfile:
csv_data1 = csv.reader(csvfile, delimiter=',')
next(csv_data1)
cursor.execute("TRUNCATE TABLE data1")
for row in csv_data1:
cursor.execute("INSERT INTO data1(ID,Cities,Pincode,Office_ID) VALUES(%s,%s,%s,%s)",row)
mydb.commit()
with open('dataset2.csv','r') as csvfile2:
csv_data2 = csv.reader(csvfile2,delimiter=',')
next(csv_data2)
cursor.execute("TRUNCATE TABLE data2")
for row in csv_data2:
cursor.execute("INSERT INTO data2(ID,Office_ID,Population) VALUES(%s,%s,%s)",row)
mydb.commit()
cursor.execute("DROP TABLE new_records")
sql=("CREATE TABLE new_records AS SELECT d.ID,d.Office_ID,d.Cities,d.Pincode,dd.population from data1 d join data2 dd on d.Office_ID=dd.Office_ID;")
cursor.execute(sql)
cursor.close()
print("Done")
|
shauryaa/CelebalAssignment1
|
try.py
|
try.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "MySQLdb.connect",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 17,
"usage_type": "call"
}
] |
36637137136
|
import tkinter as tk
from tkinter import ttk
from tkinter import *
import numpy as np
from PIL import ImageTk, Image
from os import listdir
from os.path import isfile, join
from PIL.Image import Resampling
from hopfield_clouds import HopfieldClouds
# root.columnconfigure(0, weight=1)
# root.columnconfigure(1, weight=3)
class GUI:
def __init__(self):
self.picture_size = 420
self.network = HopfieldClouds(130 ** 2)
self.root = tk.Tk()
self.root.geometry('1280x500')
self.root.title('Hopfield Clouds')
self.next_button = ttk.Button(self.root, text='>', command=self.next_image)
self.next_button.grid(row=1, column=0, sticky=tk.E)
self.prev_button = ttk.Button(self.root, text='<', command=self.prev_image)
self.prev_button.grid(row=1, column=0, sticky=tk.W)
self.original_img = self.network.get_current_image()
self.original_img = self.original_img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.img_tk = ImageTk.PhotoImage(self.original_img)
original_frame = Frame(self.root, width=self.picture_size, height=self.picture_size)
original_frame.grid(row=0, columnspan=1, sticky='we')
self.original_image_label = Label(original_frame, image=self.img_tk)
self.original_image_label.grid(row=1, column=0)
self.cropped_img = Image.fromarray(np.uint8(np.zeros((self.picture_size, self.picture_size, 3))))
self.cropped_img = ImageTk.PhotoImage(self.cropped_img)
self.cropped_frame = Frame(self.root, width=self.picture_size, height=self.picture_size)
self.cropped_frame.grid(row=0, column=1, sticky='we')
self.cropped_image_label = Label(self.cropped_frame, image=self.cropped_img)
self.cropped_image_label.grid(row=1, column=1)
self.current_value = tk.DoubleVar()
# slider
self.slider = ttk.Scale(self.root, from_=1, to=99, orient='horizontal', command=self.slider_changed,
variable=self.current_value)
self.slider.set(50)
self.slider.bind('<ButtonRelease-1>', self.slider_up)
self.slider_label = Label(self.root, text='Percentage to crop:')
self.slider_label.grid(row=1, column=1, columnspan=1, sticky='we')
self.slider.grid(column=1, columnspan=1, row=2, sticky='we')
self.value_label = ttk.Label(self.root, text=self.get_current_value())
self.value_label.grid(row=3, column=1, columnspan=1, sticky='n')
self.reconstructed_img = Image.fromarray(np.uint8(np.zeros((self.picture_size, self.picture_size, 3))))
self.reconstructed_img = ImageTk.PhotoImage(self.reconstructed_img)
self.reconstructed_frame = Frame(self.root, width=self.picture_size, height=self.picture_size)
self.reconstructed_frame.grid(row=0, column=2, columnspan=1, sticky='n')
self.reconstructed_image_label = Label(self.reconstructed_frame, image=self.reconstructed_img)
self.reconstructed_image_label.grid(row=1, column=2, columnspan=1)
self.reconstruct_button = ttk.Button(self.root, text='Reconstruct', command=self.reconstruct)
self.reconstruct_button.grid(row=1, column=2, sticky='n')
self.slider_up(None)
self.root.mainloop()
def slider_changed(self, event):
self.value_label.configure(text=self.get_current_value())
def get_current_value(self):
return '{: .2f}'.format(self.current_value.get())
def next_image(self):
img = self.network.next_image()
self.original_img = img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.img_tk = ImageTk.PhotoImage(self.original_img)
self.original_image_label.configure(image=self.img_tk)
self.slider_up(None)
def prev_image(self):
img = self.network.prev_image()
self.original_img = img.resize((self.picture_size,self.picture_size), Resampling.LANCZOS)
self.img_tk = ImageTk.PhotoImage(self.original_img)
self.original_image_label.configure(image=self.img_tk)
self.slider_up(None)
def reconstruct(self):
cropped, reconstructed = self.network.get_current_image_predictions(int(self.current_value.get()))
self.reconstructed_img = reconstructed
self.reconstructed_img = self.reconstructed_img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.reconstructed_img = ImageTk.PhotoImage(self.reconstructed_img)
self.reconstructed_image_label.configure(image=self.reconstructed_img)
def slider_up(self, event):
cropped = self.network.get_current_cropped(int(self.current_value.get()))
self.cropped_img = cropped
self.cropped_img = self.cropped_img.resize((self.picture_size, self.picture_size), Resampling.LANCZOS)
self.cropped_img = ImageTk.PhotoImage(self.cropped_img)
self.cropped_image_label.configure(image=self.cropped_img)
gui = GUI()
|
behenate/hopfield-reconstruction
|
gui.py
|
gui.py
|
py
| 5,007 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "hopfield_clouds.HopfieldClouds",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "tkinter.E",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "tkinter.W",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Resampling.LANCZOS",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Resampling",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "tkinter.DoubleVar",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Scale",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Resampling.LANCZOS",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Resampling",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Resampling.LANCZOS",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Resampling",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Resampling.LANCZOS",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Resampling",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Resampling.LANCZOS",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.Resampling",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 107,
"usage_type": "name"
}
] |
15598819292
|
import sys
sys.path.append('..')
import torch
from torch import nn
from torch.nn import functional as F
from ssd import config as cfg
from basenet.vgg import vgg_feat
from basenet.resnet import resnet101_feat
from ssd.utils_ssd.priorbox import PriorBox
from ssd.utils_ssd.L2Norm import L2Norm
from ssd.utils_ssd.detect import Detect
extras_vgg = {'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]}
extras_res = {'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]}
l_vgg = [23, 512]
l_res = [11, 512]
mbox_vgg = {'300': [(512, 4), (1024, 6), (512, 6), (256, 6), (256, 4), (256, 4)]}
mbox_res = {'300': [(512, 4), (2048, 6), (512, 6), (256, 6), (256, 4), (256, 4)]}
# extend vgg: 5 "additional" feature parts
def add_extras(i, cfg=extras_vgg, vgg=True):
fc7 = [nn.MaxPool2d(3, 1, 1), nn.Conv2d(512, 1024, 3, 1, 6, 6), nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, 1), nn.ReLU(inplace=True)] if vgg else []
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return fc7, layers
# feature map to loc+conf
def multibox(num_classes=21, cfg=mbox_vgg):
loc_layers = []
conf_layers = []
for channel, n in cfg:
loc_layers += [nn.Conv2d(channel, n * 4, 3, 1, 1)]
conf_layers += [nn.Conv2d(channel, n * num_classes, 3, 1, 1)]
return loc_layers, conf_layers
# single shot multibox detector
class SSD(nn.Module):
def __init__(self, phase, base, extras, loc, conf, num_classes, l=l_vgg):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.priors = PriorBox(cfg.v2)()
self.size = 300
self.l = l[0]
self.bone = nn.ModuleList(base)
self.l2norm = L2Norm(l[1], 20)
self.extras = nn.ModuleList(extras)
self.loc, self.conf = nn.ModuleList(loc), nn.ModuleList(conf)
if phase == 'test':
self.detect = Detect(num_classes, cfg.top_k, cfg.conf_thresh, cfg.nms_thresh)
def forward(self, x):
source, loc, conf = list(), list(), list()
for k in range(self.l):
x = self.bone[k](x)
source.append(self.l2norm(x))
for k in range(self.l, len(self.bone)):
x = self.bone[k](x)
source.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
source.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(source, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if not self.priors.is_cuda and loc.is_cuda:
self.priors = self.priors.cuda()
if self.phase == 'test':
output = self.detect(
loc.view(loc.size(0), -1, 4),
F.softmax(conf.view(conf.size(0), -1, self.num_classes), dim=2),
self.priors
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def build_ssd(phase, size=300, num_classes=21, bone='vgg'):
if phase != 'test' and phase != 'train':
assert "Error: Phase not recognized"
if size != 300:
assert "Error: Only SSD300 us supported"
if bone == 'vgg':
base_ = vgg_feat()
fc7_, extras_ = add_extras(1024, extras_vgg['300'])
loc_, conf_ = multibox(num_classes, mbox_vgg['300'])
l = l_vgg
elif bone == 'res101':
base_ = resnet101_feat()
fc7_, extras_ = add_extras(2048, extras_res['300'], False)
loc_, conf_ = multibox(num_classes, mbox_res['300'])
l = l_res
else:
raise IOError("only vgg or res101")
return SSD(phase, base_ + fc7_, extras_, loc_, conf_, num_classes, l)
if __name__ == '__main__':
net = build_ssd('train', bone='vgg')
img = torch.randn((1, 3, 300, 300))
out = net(img)
print(out[1])
|
AceCoooool/detection-pytorch
|
ssd/ssd300.py
|
ssd300.py
|
py
| 4,567 |
python
|
en
|
code
| 24 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ssd.config",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "ssd.config",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "ssd.config",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "ssd.utils_ssd.priorbox.PriorBox",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ssd.config.v2",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "ssd.config",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "ssd.utils_ssd.L2Norm.L2Norm",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "ssd.utils_ssd.detect.Detect",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "ssd.config.top_k",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "ssd.config",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "ssd.config.conf_thresh",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "ssd.config.nms_thresh",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "basenet.vgg.vgg_feat",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "basenet.resnet.resnet101_feat",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 127,
"usage_type": "call"
}
] |
1360530890
|
import Utils.Data as data
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set, \
get_test_or_val_set_id_from_train
from Utils.Data.Features.Feature import Feature
from Utils.Data.Features.Generated.EnsemblingFeature.MatrixEnsembling import ItemCBFMatrixEnsembling
from Utils.Data.Features.Generated.EnsemblingFeature.XGBEnsembling import XGBEnsembling
from Utils.Data.Features.Generated.GeneratedFeature import GeneratedFeaturePickle
import pathlib as pl
import numpy as np
import pandas as pd
import hashlib
from Utils.Data.Sparse.CSR.CreatorTweetMatrix import CreatorTweetMatrix
from Utils.Data.Sparse.CSR.HashtagMatrix import HashtagMatrix
from Utils.Data.Sparse.CSR.URM import URM
class HashtagSimilarityFoldEnsembling(GeneratedFeaturePickle):
def __init__(self,
dataset_id: str,
label: str,
number_of_folds: int = 5
):
feature_name = f"hashtag_similarity_fold_ensembling_{label}"
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/similarity_ensembling/{self.feature_name}.pck.gz")
# self.csv_path = pl.Path(
# f"{Feature.ROOT_PATH}/{self.dataset_id}/similarity_ensembling/{self.feature_name}.csv.gz")
# self.number_of_folds = number_of_folds
# self.engager_features = [
# "mapped_feature_engager_id",
# "mapped_feature_tweet_id",
# f"tweet_feature_engagement_is_{label}"
# ]
# self.creator_features = [
# "mapped_feature_creator_id",
# "mapped_feature_tweet_id"
# ]
def create_feature(self):
raise Exception("This feature is created externally. See gen_hashtag_similarity...py")
# # Load the hashtag similarity
# sim = HashtagMatrix().load_similarity().tocsr()
#
# # Check if the dataset id is train or test
# if not is_test_or_val_set(self.dataset_id):
# # Compute train and test dataset ids
# train_dataset_id = self.dataset_id
#
# # Load the dataset and shuffle it
# X_train = data.Data.get_dataset(features=self.engager_features,
# dataset_id=train_dataset_id).sample(frac=1)
#
# creator_X_train = data.Data.get_dataset(features=self.creator_features,
# dataset_id=train_dataset_id)
#
# # Create the ctm 'creator tweet matrix'
# ctm = CreatorTweetMatrix(creator_X_train).get_as_urm().astype(np.uint8)
#
# # Compute the folds
# X_train_folds = np.array_split(X_train, self.number_of_folds)
#
# # Declare list of scores (of each folds)
# # used for aggregating results
# scores = []
#
# # Train multiple models with 1-fold out strategy
# for i in range(self.number_of_folds):
# # Compute the train set
# X_train = pd.concat([X_train_folds[x].copy() for x in range(self.number_of_folds) if x is not i])
# X_train.columns = [
# "mapped_feature_engager_id",
# "mapped_feature_tweet_id",
# "engagement"
# ]
#
#
# # Compute the test set
# X_test = X_train_folds[i].copy()
#
# # Generate the dataset id for this fold
# fold_dataset_id = f"{self.feature_name}_{self.dataset_id}_fold_{i}"
#
# # Load the urm
# urm = URM(X_train).get_as_urm().astype(np.uint8)
# urm = urm + ctm
#
# # Create the sub-feature
# feature = ItemCBFMatrixEnsembling(self.feature_name, fold_dataset_id, urm, sim, X_train)
#
# # Retrieve the scores
# scores.append(pd.DataFrame(feature.load_or_create()))
# print(X_test.index)
# print(scores.index)
#
# # Compute the resulting dataframe and sort the results
# result = pd.concat(scores).sort_index()
#
# # Save it as a feature
# self.save_feature(result)
#
# else:
# test_dataset_id = self.dataset_id
# train_dataset_id = get_train_set_id_from_test_or_val_set(test_dataset_id)
#
# creator_X_train = data.Data.get_dataset(features=self.creator_features,
# dataset_id=train_dataset_id)
# creator_X_test = data.Data.get_dataset(features=self.creator_features,
# dataset_id=test_dataset_id)
# creator_X = pd.concat([creator_X_train, creator_X_test])
#
# # Create the ctm 'creator tweet matrix'
# ctm = CreatorTweetMatrix(creator_X).get_as_urm().astype(np.uint8)
#
# # Load the train dataset
# X_train = data.Data.get_dataset(features=self.engager_features, dataset_id=train_dataset_id)
# X_train.columns = [
# "mapped_feature_engager_id",
# "mapped_feature_tweet_id",
# "engagement"
# ]
# # Load the urm
# urm = URM(X_train).get_as_urm().astype(np.uint8)
# urm = urm + ctm
#
# # Load the test dataset
# X_test = data.Data.get_dataset(features=self.engager_features, dataset_id=test_dataset_id)
# X_test.columns = ["user", "item", "engagement"]
#
# # Create the sub-feature
# feature = ItemCBFMatrixEnsembling(self.feature_name, self.dataset_id, urm, sim, X_test.copy())
#
# # Retrieve the scores
# result = pd.DataFrame(feature.load_or_create(), index=X_test.index)
#
# # Save it as a feature
# self.save_feature(result)
class DomainSimilarityFoldEnsembling(GeneratedFeaturePickle):
def __init__(self,
dataset_id: str,
label: str,
number_of_folds: int = 5
):
feature_name = f"domain_similarity_fold_ensembling_{label}"
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/similarity_ensembling/{self.feature_name}.pck.gz")
def create_feature(self):
raise Exception("This feature is created externally. See gen_hashtag_similarity...py")
class LinkSimilarityFoldEnsembling(GeneratedFeaturePickle):
def __init__(self,
dataset_id: str,
label: str,
number_of_folds: int = 5
):
feature_name = f"link_similarity_fold_ensembling_{label}"
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/similarity_ensembling/{self.feature_name}.pck.gz")
def create_feature(self):
raise Exception("This feature is created externally. See gen_hashtag_similarity...py")
|
MaurizioFD/recsys-challenge-2020-twitter
|
Utils/Data/Features/Generated/EnsemblingFeature/SimilarityFoldEnsembling.py
|
SimilarityFoldEnsembling.py
|
py
| 7,398 |
python
|
en
|
code
| 39 |
github-code
|
6
|
[
{
"api_name": "Utils.Data.Features.Generated.GeneratedFeature.GeneratedFeaturePickle",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "Utils.Data.Features.Feature.Feature.ROOT_PATH",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "Utils.Data.Features.Feature.Feature",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Utils.Data.Features.Generated.GeneratedFeature.GeneratedFeaturePickle",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "Utils.Data.Features.Feature.Feature.ROOT_PATH",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "Utils.Data.Features.Feature.Feature",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "Utils.Data.Features.Generated.GeneratedFeature.GeneratedFeaturePickle",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "Utils.Data.Features.Feature.Feature.ROOT_PATH",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "Utils.Data.Features.Feature.Feature",
"line_number": 167,
"usage_type": "name"
}
] |
75136926587
|
import pytest
import tgalice
from dialog_manager import QuizDialogManager
@pytest.fixture
def default_dialog_manager():
return QuizDialogManager.from_yaml('texts/quiz.yaml')
def make_context(text='', prev_response=None, new_session=False):
if prev_response is not None:
user_object = prev_response.updated_user_object
else:
user_object = {}
if new_session:
metadata = {'new_session': True}
else:
metadata = {}
return tgalice.dialog_manager.Context(user_object=user_object, metadata=metadata, message_text=text)
def test_start(default_dialog_manager):
r0 = default_dialog_manager.respond(make_context(new_session=True))
assert 'Йоу!' in r0.text # substring in string
assert 'да' in r0.suggests # string in list of strings
assert 'нет' in r0.suggests # string in list of strings
def test_randomization(default_dialog_manager):
r0 = default_dialog_manager.respond(make_context(new_session=True))
r1 = default_dialog_manager.respond(make_context(text='да', prev_response=r0))
chosen_options = set()
for i in range(100):
r2 = default_dialog_manager.respond(
make_context(text='какая-то безумная хрень которая точно не матчится', prev_response=r1)
)
chosen_options.add(r2.updated_user_object['form']['sex'])
assert chosen_options == {'м', 'ж'}
|
avidale/musiquiz
|
test_scenarios.py
|
test_scenarios.py
|
py
| 1,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dialog_manager.QuizDialogManager.from_yaml",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dialog_manager.QuizDialogManager",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tgalice.dialog_manager.Context",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tgalice.dialog_manager",
"line_number": 21,
"usage_type": "attribute"
}
] |
160637604
|
import numpy as np
import pandas as pd
#Setting the recent season match
yrBefore = np.arange(1900,2023)
yrAfter = np.arange(1901,2024)
yrBefore_list = []
yrAfter_list = []
for s in yrBefore:
a = str(s)
yrBefore_list.append(a)
for j in yrAfter:
b = str(j)
yrAfter_list.append(b)
season_list = []
for f in range (len(yrBefore)):
season = yrBefore_list[f] + '/' + yrAfter_list[f]
season_list.append(season)
#Getting Table from online
df_bt = pd.read_html("https://www.soccerbase.com/teams/team.sd?team_id=2898&team2_id=376&teamTabs=h2h")
#Picking Table From Source
sdf= df_bt[2]
startingYear = sdf.columns[0]
if startingYear in season_list:
x = startingYear
else:
print ('No past record of the teams')
y = x
r = x + '.1'
n = x + '.2'
m = x + '.7'
l = x + '.8'
p = x + '.9'
new_df = sdf[sdf[r].apply(lambda x: x[4])!= '/']
new_df.drop(y, axis = 1, inplace = True)
new_df.set_index(r,inplace= True)
new_df.drop([n, m,l,p], axis = 1, inplace = True)
new_df.columns = ['Home', 'Scores', 'Away', 'Result']
new_df.index.names = ['Date']
new_df['ScoresH'] = new_df['Scores'].apply(lambda x: x[0])
new_df['ScoresA'] = new_df['Scores'].apply(lambda x: x[4])
new_df['ScoresH'] = new_df['ScoresH'].apply(lambda x: int(x))
new_df['ScoresA'] = new_df['ScoresA'].apply(lambda x: int(x))
new_df['ResultN'] = new_df['ScoresH'] - new_df['ScoresA']
new_df['Result'][new_df['ResultN']>0]=new_df['Home']
new_df['Result'][new_df['ResultN']<0]=new_df['Away']
new_df['Result'][new_df['ResultN']==0]='Draw'
new_df['Result']= new_df['Result'] + ' Wins'
Result = pd.get_dummies(new_df['Result'])
Home = pd.get_dummies(new_df['Home'])
Away = pd.get_dummies(new_df['Away'])
new_df.drop(['Home','Scores', 'Away'], axis = 1,inplace = True)
ddf= pd.concat([new_df,Result,Home,Away],axis = 1)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,confusion_matrix
for i in Result:
x = i
print(x.upper())
X_train, X_test, y_train, y_test = train_test_split(ddf.drop([x,'Result'],axis=1),
ddf[x], test_size=0.30,
random_state=101)
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
predictions = logmodel.predict(X_test)
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
|
Taofeek26/Taofeek26
|
btttt.py
|
btttt.py
|
py
| 2,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.arange",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_html",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 90,
"usage_type": "call"
}
] |
19809314779
|
import os
from PIL import Image
from typing import Dict, List
from preprocessing.image_metadata import ImageMetadata
class ImagesReader:
def __init__(self, base_path: str) -> None:
self.__basePath = base_path
def read_train_images(self) -> Dict[str, List[ImageMetadata]]:
images = {}
dataset_dir = os.path.join(self.__basePath, 'train')
for root, dirs, files in os.walk(dataset_dir, topdown=False):
if root not in [self.__basePath, dataset_dir]:
files = [img for img in files if img.endswith('.jpg') or img.endswith('.JPEG')]
class_id = self.__get_class_id__(root)
images[class_id] = []
for name in files:
image = self.__get_image_metadata__(os.path.join(root, name))
images[class_id].append(image)
return images
def read_test_images(self) -> List[ImageMetadata]:
images = []
dataset_dir = os.path.join(self.__basePath, 'test')
files = [img for img in os.listdir(dataset_dir) if img.endswith('.jpg') or img.endswith('.JPEG')]
for name in files:
image = self.__get_image_metadata__(os.path.join(dataset_dir, name))
images.append(image)
return images
@staticmethod
def __get_image_metadata__(image_path: str) -> ImageMetadata:
image = Image.open(image_path)
return ImageMetadata(image.filename, (image.width, image.height), image.layers, image.mode)
@staticmethod
def __get_class_id__(dir_path: str) -> str:
class_id = dir_path.split(os.sep)[-1].split('.')[0]
return class_id
|
sachokFoX/caltech_256
|
code/preprocessing/images_reader.py
|
images_reader.py
|
py
| 1,666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "preprocessing.image_metadata.ImageMetadata",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "preprocessing.image_metadata.ImageMetadata",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "preprocessing.image_metadata.ImageMetadata",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "preprocessing.image_metadata.ImageMetadata",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.sep",
"line_number": 46,
"usage_type": "attribute"
}
] |
22002934531
|
"""
Interfaces for Deep Q-Network.
"""
import random
import numpy as np
import tensorflow as tf
from collections import deque
from scipy.misc import imresize
from qnet import QNet
class DeepQLearner(object):
"""
Provides wrapper around TensorFlow for Deep Q-Network.
"""
def __init__(self,
actions,
weight_save_path,
weight_restore_path,
log_path,
weight_save_frequency,
update_frequency,
log_frequency,
batch_size,
learning_rate,
burn_in_duration,
exploration_duration,
exploration_end_rate,
replay_max_size,
discount_rate,
action_repeat,
state_frames,
frame_height,
frame_width,
dueling,
pooling,
training):
"""
Intializes the TensorFlow graph.
Args:
actions: List of viable actions learner can make. (Must be PyGame constants.)
checkpoint_path: File path to store saved weights.
save: If true, will save weights regularly.
restore: If true, will restore weights right away from checkpoint_path.
"""
# Save allowed actions.
self.actions = actions
# Handle network save/restore.
self.weight_save_path = weight_save_path
self.weight_restore_path = weight_restore_path
self.log_path = log_path
# Save training parameters.
self.weight_save_frequency = weight_save_frequency
self.update_frequency = update_frequency
self.log_frequency = log_frequency
self.batch_size = batch_size
self.learning_rate = learning_rate
self.burn_in_duration = burn_in_duration
self.exploration_duration = exploration_duration
self.exploration_rate = 1.
self.exploration_end_rate = exploration_end_rate
self.replay_max_size = replay_max_size
self.discount_rate = discount_rate
self.action_repeat = action_repeat
self.state_frames = state_frames
self.frame_height = frame_height
self.frame_width = frame_width
self.dueling = dueling
self.pooling = pooling
self.training = training
# Initialize variables.
self.iteration = -1
self.actions_taken = 0
self.repeating_action_rewards = 0
self.update_count = 0
# Create network.
self.net = QNet(self.state_frames,
self.frame_height,
self.frame_width,
len(actions),
self.learning_rate)
# Restore weights if needed.
if self.weight_restore_path:
try: self.__restore()
except: pass
if self.log_path:
open(self.log_path, 'w').close()
# Store all previous transitions in a deque to allow for efficient
# popping from the front and to allow for size management.
#
# Transitions are dictionaries of the following form:
# {
# 'state_in': The Q-network input state of this instance.
# 'action': The action index (indices) taken at this frame.
# 'reward': The reward from this action.
# 'terminal': True if the action led to a terminal state.
# 'state_out': The state resulting from the transition action and initial state.
# }
self.transitions = deque(maxlen=self.replay_max_size)
def __normalize_frame(self, frame):
"""
Normalizes the screen array to be 84x84x1, with floating point values in
the range [0, 1].
Args:
frame: The pixel values from the screen.
Returns:
An 84x84x1 floating point numpy array.
"""
return np.reshape(
np.mean(imresize(frame, (self.frame_height, self.frame_width)), axis=2),
(self.frame_height, self.frame_width, 1))
def __preprocess(self, frame):
"""
Resize image, pool across color channels, and normalize pixels.
Args:
frame: The frame to process.
Returns:
The preprocessed frame.
"""
proc_frame = self.__normalize_frame(frame)
if not len(self.transitions):
return np.repeat(proc_frame, self.state_frames, axis=2)
else:
return np.concatenate(
(proc_frame, self.transitions[-1]['state_in'][:, :, -(self.state_frames-1):]),
axis=2)
def __remember_transition(self, pre_frame, action, terminal):
"""
Returns the transition dictionary for the given data. Defer recording the
reward and resulting state until they are observed.
Args:
pre_frame: The frame at the current time.
action: The index of the action(s) taken at current time.
terminal: True if the action at current time led to episode termination.
"""
self.transitions.append({
'state_in': pre_frame,
'action': self.actions.index(action),
'terminal': terminal})
def __observe_result(self, resulting_state, reward):
"""
Records the resulting state and reward from the previous action.
Args:
resulting_state: The (preprocessed) state resulting from the previous action.
reward: The reward from the previous transition.
"""
if not len(self.transitions):
return
self.transitions[-1]['reward'] = reward
self.transitions[-1]['state_out'] = resulting_state
def __is_burning_in(self):
"""
Returns true if the network is still burning in (observing transitions)."""
return self.iteration < self.burn_in_duration
def __do_explore(self):
"""
Returns true if a random action should be taken, false otherwise.
Decays the exploration rate if the final exploration frame has not been reached.
"""
if not self.__is_burning_in() and self.exploration_rate > self.exploration_end_rate:
self.exploration_rate = max(self.exploration_end_rate, (self.exploration_duration - self.update_count) / (self.exploration_duration))
return random.random() < self.exploration_rate or self.__is_burning_in()
def __best_action(self, frame):
"""
Returns the best action to perform.
Args:
frame: The current (preprocessed) frame.
"""
return self.actions[np.argmax(self.net.compute_q(frame))]
def __random_action(self):
"""
Returns a random action to perform.
"""
return self.actions[int(random.random() * len(self.actions))]
def __compute_target_reward(self, trans):
"""
Computes the target reward for the given transition.
Args:
trans: The transition for which to compute the target reward.
Returns:
The target reward.
"""
target_reward = trans['reward']
if not trans['terminal']:
target_reward += self.discount_rate * np.amax(self.net.compute_q(trans['state_out']))
return target_reward
def step(self,
frame,
reward,
terminal,
score_ratio=None):
"""
Steps the training algorithm given the current frame and previous reward.
Assumes that the reward is a consequence of the previous action.
Args:
frame: Current game frame.
reward: Reward value from previous action.
terminal: True if the previous action was termnial.
Returns:
The next action to perform.
"""
self.iteration += 1
# Log if necessary.
if self.iteration % self.log_frequency == 0:
self.__log_status(score_ratio)
# Repeat previous action for some number of iterations.
# If we ARE repeating an action, we pretend that we did not see
# this frame and just keep doing what we're doing.
if self.iteration % self.action_repeat != 0:
self.repeating_action_rewards += reward
return [self.transitions[-1]['action']]
# Observe the previous reward.
proc_frame = self.__preprocess(frame)
self.__observe_result(proc_frame, self.repeating_action_rewards)
if self.training:
# Save network if necessary before updating.
if self.weight_save_path and self.iteration % self.weight_save_frequency == 0 and self.iteration > 0:
self.__save()
# If not burning in, update the network.
if not self.__is_burning_in() and self.actions_taken % self.update_frequency == 0:
self.update_count += 1
# Update network from the previous action.
minibatch = random.sample(self.transitions, self.batch_size)
batch_frames = [trans['state_in'] for trans in minibatch]
batch_actions = [trans['action'] for trans in minibatch]
batch_targets = [self.__compute_target_reward(trans) for trans in minibatch]
self.net.update(batch_frames, batch_actions, batch_targets)
# Select the next action.
action = self.__random_action() if self.__do_explore() else self.__best_action(proc_frame)
self.actions_taken += 1
# Remember the action and the input frames, reward to be observed later.
self.__remember_transition(proc_frame, action, terminal)
# Reset rewards counter for each group of 4 frames.
self.repeating_action_rewards = 0
return [action]
def __log_status(self, score_ratio=None):
"""
Print the current status of the Q-DQN.
Args:
score_ratio: Score ratio given by the PyGamePlayer.
"""
print(' Iteration : %d' % self.iteration)
if self.update_count > 0:
print(' Update count : %d' % self.update_count)
if self.__is_burning_in() or len(self.transitions) < self.replay_max_size:
print(' Replay capacity : %d' % len(self.transitions))
if self.exploration_rate > self.exploration_end_rate and not self.__is_burning_in():
print(' Exploration rate: %0.20f' % self.exploration_rate)
# If we're using the network, print a sample of the output.
if not self.__is_burning_in():
print(' Sample Q output :', self.net.compute_q(self.transitions[-1]['state_in']))
if score_ratio:
print(' Score ratio : %0.20f' % score_ratio)
print('==============================================================================')
# Write to log file.
open(self.log_path, "a").write(str(score_ratio) + '\n')
def __save(self):
"""
Save the current network parameters in the checkpoint path.
"""
self.net.saver.save(self.net.sess, self.weight_save_path, global_step=self.iteration)
def __restore(self):
"""
Restore the network from the checkpoint path.
"""
if not os.path.exists(self.weight_restore_path):
raise Exception('No such checkpoint path %s!' % self.weight_restore_path)
# Get path to weights.
path = tf.train.get_checkpoint_state(self.weight_restore_path).model_checkpoint_path
# Restore iteration number.
self.iteration = int(path[(path.rfind('-')+1):]) - 1
# Restore exploration rate.
self.exploration_rate = max(self.exploration_end_rate, (self.exploration_duration - self.iteration / self.update_frequency / self.action_repeat) / (self.exploration_duration))
# Restore network weights.
self.net.saver.restore(self.net.sess, path)
print("Network weights, exploration rate, and iteration number restored!")
|
TianyiWu96/DQN
|
src/qlearn.py
|
qlearn.py
|
py
| 11,988 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "qnet.QNet",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "scipy.misc.imresize",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.get_checkpoint_state",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 320,
"usage_type": "attribute"
}
] |
71579186747
|
""" Optimizes GPST model hyperparameters via Optuna. """
import os
import time
import json
import shutil
import logging
import argparse
import tempfile
import datetime
import optuna
from train import train
from lumber import get_log
from arguments import get_args
def main() -> None:
""" Run an Optuna study. """
datestring = str(datetime.datetime.now())
datestring = datestring.replace(" ", "_")
log_path = get_log("snow")
logging.getLogger().setLevel(logging.INFO) # Setup the root logger.
logging.getLogger().addHandler(logging.FileHandler(log_path))
optuna.logging.enable_propagation() # Propagate logs to the root logger.
optuna.logging.disable_default_handler() # Stop showing logs in stderr.
study = optuna.create_study()
logging.getLogger().info("Start optimization.")
study.optimize(objective, n_trials=100)
def objective(trial: optuna.Trial) -> float:
"""
Optuna objective function. Should never be called explicitly.
Parameters
----------
trial : ``optuna.Trial``, required.
The trial with which we define our hyperparameter suggestions.
Returns
-------
loss : ``float``.
The output from the model call after the timeout value specified in ``snow.sh``.
"""
parser = argparse.ArgumentParser()
parser = get_args(parser)
args = parser.parse_args()
# Set arguments.
args.num_train_epochs = 10000
args.stationarize = False
args.normalize = False
args.seq_norm = False
args.seed = 42
args.max_grad_norm = 3
args.adam_epsilon = 7.400879524874149e-08
args.warmup_proportion = 0.0
args.sep = ","
batch_size = 64
n_positions = 30
agg_size = 1
# Commented-out trial suggestions should be placed at top of block.
# args.stationarize = trial.suggest_categorical("stationarize", [True, False])
# agg_size = trial.suggest_discrete_uniform("agg_size", 1, 40, 5)
# args.warmup_proportion = trial.suggest_uniform("warmup_proportion", 0.05, 0.4)
# batch_size = trial.suggest_discrete_uniform("train_batch_size", 4, 64, 4)
args.weight_decay = trial.suggest_loguniform("weight_decay", 0.0001, 0.01)
args.learning_rate = trial.suggest_loguniform("learning_rate", 8e-7, 5e-3)
args.train_batch_size = int(batch_size)
args.aggregation_size = int(agg_size)
logging.getLogger().info(str(args))
# Set config.
config = {}
config["initializer_range"] = 0.02
config["n_head"] = 8
config["n_embd"] = 256
config["n_layer"] = 6
config["input_dim"] = 300
config["orderbook_depth"] = 6
config["horizon"] = 30
config["modes"] = [
"bid_classification",
"bid_increase",
"bid_decrease",
"ask_classification",
"ask_increase",
"ask_decrease",
]
# Commented-out trial suggestions should be placed at top of block.
# config["n_head"] = int(trial.suggest_discrete_uniform("n_head", 4, 16, 4))
# config["n_embd"] = int(trial.suggest_discrete_uniform("n_embd", 64, 128, 8))
# config["n_layer"] = trial.suggest_int("n_layer", 4, 8)
n_positions = int(trial.suggest_discrete_uniform("n_ctx", 60, 600, 30))
config["layer_norm_epsilon"] = trial.suggest_loguniform("layer_eps", 1e-5, 1e-3)
config["resid_pdrop"] = trial.suggest_loguniform("resid_pdrop", 0.01, 0.15)
config["attn_pdrop"] = trial.suggest_loguniform("attn_pdrop", 0.1, 0.3)
config["initializer_range"] = trial.suggest_loguniform("initrange", 0.005, 0.04)
config["n_positions"] = n_positions
config["n_ctx"] = n_positions
dirpath = tempfile.mkdtemp()
config_filename = str(time.time()) + ".json"
config_filepath = os.path.join(dirpath, config_filename)
with open(config_filepath, "w") as path:
json.dump(config, path)
args.gpst_model = config_filepath
args.model_name = "optuna"
args.trial = trial
loss = train(args)
shutil.rmtree(dirpath)
return loss
if __name__ == "__main__":
main()
|
langfield/spred
|
spred/gpst/optimize.py
|
optimize.py
|
py
| 4,018 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "lumber.get_log",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "optuna.logging.enable_propagation",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "optuna.logging",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "optuna.logging.disable_default_handler",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "optuna.logging",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "optuna.create_study",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "optuna.Trial",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "arguments.get_args",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "train.train",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 119,
"usage_type": "call"
}
] |
72255300348
|
from __future__ import annotations
import json
import re
from typing import TYPE_CHECKING
import asyncpg
import discord
import pandas as pd
from tweepy.asynchronous import AsyncClient
from ..helpers import add_prefix
if TYPE_CHECKING:
from bot import Bot
async def setup_cache(bot: Bot):
prefixes = await bot.pool.fetch("SELECT * FROM guild_prefixes")
for record in prefixes:
add_prefix(bot, record["guild_id"], record["prefix"])
guild_settings = await bot.pool.fetch("SELECT * FROM guild_settings")
for guild in guild_settings:
if guild["poketwo"]:
await bot.redis.sadd("poketwo_guilds", guild["guild_id"])
if guild["auto_download"]:
await bot.redis.sadd("auto_download_channels", guild["auto_download"])
if guild["auto_reactions"]:
await bot.redis.sadd("auto_reactions", guild["guild_id"])
blacklisted = await bot.pool.fetch("SELECT snowflake FROM block_list")
for snowflake in blacklisted:
await bot.redis.sadd("block_list", snowflake["snowflake"])
afk = await bot.pool.fetch("SELECT * FROM afk")
for row in afk:
await bot.redis.sadd("afk_users", row["user_id"])
covers = await bot.pool.fetch("SELECT * FROM nsfw_covers")
for row in covers:
await bot.redis.sadd("nsfw_covers", row["album_id"])
opted_out = await bot.pool.fetch("SELECT * FROM opted_out")
for row in opted_out:
for item in row["items"]:
await bot.redis.sadd(f"opted_out:{row['user_id']}", item)
user_settings = await bot.pool.fetch("SELECT * FROM user_settings")
for row in user_settings:
if row["fm_autoreact"]:
await bot.redis.sadd("fm_autoreactions", row["user_id"])
if row["mudae_pokemon"]:
await bot.redis.sadd("mudae_pokemon_reminders", row["user_id"])
async def setup_webhooks(bot: Bot):
for name, webhook in bot.config["webhooks"].items():
bot.webhooks[name] = discord.Webhook.from_url(url=webhook, session=bot.session)
for name, webhook in bot.config["avatar_webhooks"].items():
bot.avatar_webhooks[name] = discord.Webhook.from_url(
url=webhook, session=bot.session
)
for name, webhook in bot.config["image_webhooks"].items():
bot.image_webhooks[name] = discord.Webhook.from_url(
url=webhook, session=bot.session
)
for name, webhook in bot.config["icon-webhooks"].items():
bot.icon_webhooks[name] = discord.Webhook.from_url(
url=webhook, session=bot.session
)
async def setup_pokemon(bot: Bot):
url = "https://raw.githubusercontent.com/poketwo/data/master/csv/pokemon.csv"
data = pd.read_csv(url)
pokemon = [str(p).lower() for p in data["name.en"]]
for p in pokemon:
if re.search(r"[\U00002640\U0000fe0f|\U00002642\U0000fe0f]", p):
pokemon[pokemon.index(p)] = re.sub(
"[\U00002640\U0000fe0f|\U00002642\U0000fe0f]", "", p
)
if re.search(r"[\U000000e9]", p):
pokemon[pokemon.index(p)] = re.sub("[\U000000e9]", "e", p)
bot.pokemon = pokemon
async def setup_accounts(bot: Bot):
accounts = await bot.pool.fetch("SELECT * FROM accounts")
for record in accounts:
if record["osu"]:
await bot.redis.hset(f"accounts:{record['user_id']}", "osu", record["osu"])
if record["lastfm"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "lastfm", record["lastfm"]
)
if record["steam"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "steam", record["steam"]
)
if record["roblox"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "roblox", record["roblox"]
)
if record["genshin"]:
await bot.redis.hset(
f"accounts:{record['user_id']}", "genshin", record["genshin"]
)
async def create_pool(bot: Bot, connection_url: str):
def _encode_jsonb(value):
return json.dumps(value)
def _decode_jsonb(value):
return json.loads(value)
async def init(con):
await con.set_type_codec(
"jsonb",
schema="pg_catalog",
encoder=_encode_jsonb,
decoder=_decode_jsonb,
format="text",
)
connection = await asyncpg.create_pool(connection_url, init=init)
if connection is None:
raise Exception("Failed to connect to database")
bot.pool = connection
|
LeoCx1000/fish
|
src/utils/core/startup.py
|
startup.py
|
py
| 4,587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "bot.Bot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "bot.pool.fetch",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "helpers.add_prefix",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bot.pool.fetch",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "bot.pool.fetch",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "bot.pool.fetch",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "bot.pool.fetch",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "bot.pool.fetch",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "bot.pool.fetch",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.sadd",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "bot.Bot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "bot.config",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "bot.webhooks",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "discord.Webhook.from_url",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "discord.Webhook",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "bot.session",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "bot.config",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "bot.avatar_webhooks",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "discord.Webhook.from_url",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "discord.Webhook",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "bot.session",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "bot.config",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "bot.image_webhooks",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "discord.Webhook.from_url",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "discord.Webhook",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "bot.session",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "bot.config",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "bot.icon_webhooks",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "discord.Webhook.from_url",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "discord.Webhook",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "bot.session",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "bot.Bot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "bot.pokemon",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "bot.Bot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "bot.pool.fetch",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.hset",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.hset",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.hset",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.hset",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "bot.redis.hset",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "bot.redis",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "bot.Bot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "asyncpg.create_pool",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 139,
"usage_type": "attribute"
}
] |
31957026711
|
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any, Union, Optional, TypedDict, final
from datetime import datetime
import attr
import ujson
from tomodachi.utils import helpers
from tomodachi.core.enums import ActionType
if TYPE_CHECKING:
from tomodachi.core.bot import Tomodachi
__all__ = ["Action", "ActionScheduler"]
class ReminderExtras(TypedDict):
content: str
class InfractionExtras(TypedDict):
target_id: int
reason: str
def convert_action_type(val: Any) -> ActionType:
if isinstance(val, ActionType):
return val
return ActionType(val)
def convert_extra(val: Any) -> Optional[dict]:
if val is None:
return None
if isinstance(val, dict):
return val
return ujson.loads(val)
@attr.s(slots=True, auto_attribs=True)
class Action:
id: Optional[int] = None
action_type: Optional[ActionType] = attr.ib(converter=convert_action_type, default=ActionType.REMINDER)
created_at: Optional[datetime] = attr.ib(factory=helpers.utcnow)
trigger_at: Optional[datetime] = attr.ib(factory=helpers.utcnow)
author_id: Optional[int] = None
guild_id: Optional[int] = None
channel_id: Optional[int] = None
message_id: Optional[int] = None
extra: Optional[Union[ReminderExtras, InfractionExtras]] = attr.ib(converter=convert_extra, default=None)
@final
class ActionScheduler:
def __init__(self, bot: Tomodachi):
self.bot = bot
self.cond = asyncio.Condition()
self.task = asyncio.create_task(self.dispatcher())
self.active: Optional[Action] = None
async def dispatcher(self):
async with self.cond:
action = self.active = await self.get_action()
if not action:
await self.cond.wait()
await self.redispatch()
now = helpers.utcnow()
if action.trigger_at >= now:
delta = (action.trigger_at - now).total_seconds()
await asyncio.sleep(delta)
await self.trigger_action(action)
await self.redispatch()
async def redispatch(self):
if not self.task.cancelled() or self.task.done():
self.task.cancel()
self.task = asyncio.create_task(self.dispatcher())
async with self.cond:
self.cond.notify_all()
async def get_action(self):
async with self.bot.db.pool.acquire() as conn:
query = """SELECT *
FROM actions
WHERE (CURRENT_TIMESTAMP + '28 days'::interval) > actions.trigger_at
ORDER BY actions.trigger_at
LIMIT 1;"""
stmt = await conn.prepare(query)
record = await stmt.fetchrow()
if not record:
return None
return Action(**record)
async def schedule(self, a: Action):
now = helpers.utcnow()
delta = (a.trigger_at - now).total_seconds()
if delta <= 60 and a.action_type is not ActionType.INFRACTION:
asyncio.create_task(self.trigger_short_action(delta, a))
return a
async with self.bot.db.pool.acquire() as conn:
await conn.set_type_codec("jsonb", encoder=ujson.dumps, decoder=ujson.loads, schema="pg_catalog")
query = """INSERT INTO actions (action_type, trigger_at, author_id, guild_id, channel_id, message_id, extra)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING *;"""
stmt = await conn.prepare(query)
record = await stmt.fetchrow(
a.action_type.name,
a.trigger_at,
a.author_id,
a.guild_id,
a.channel_id,
a.message_id,
a.extra,
)
a = Action(**record)
# Once the new action created dispatcher has to be restarted
# but only if the currently active action happens later than new
if (self.active and self.active.trigger_at >= a.trigger_at) or self.active is None:
asyncio.create_task(self.redispatch())
return a
async def trigger_action(self, action: Action):
if action.action_type is ActionType.INFRACTION:
infraction = await self.bot.infractions.get_by_action(action.id)
self.bot.dispatch("expired_infraction", infraction=infraction)
else:
self.bot.dispatch("triggered_action", action=action)
await self.bot.db.pool.execute("DELETE FROM actions WHERE id = $1;", action.id)
async def trigger_short_action(self, seconds, action: Action):
await asyncio.sleep(seconds)
self.bot.dispatch("triggered_action", action=action)
|
httpolar/tomodachi
|
tomodachi/core/actions.py
|
actions.py
|
py
| 4,732 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.TypedDict",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.TypedDict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "tomodachi.core.enums.ActionType",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "tomodachi.core.enums.ActionType",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tomodachi.core.enums.ActionType",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "ujson.loads",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "tomodachi.core.enums.ActionType",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "attr.ib",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tomodachi.core.enums.ActionType.REMINDER",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "attr.ib",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tomodachi.utils.helpers.utcnow",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tomodachi.utils.helpers",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "attr.ib",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tomodachi.utils.helpers.utcnow",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tomodachi.utils.helpers",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "attr.ib",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "attr.s",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tomodachi.core.bot.Tomodachi",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "asyncio.Condition",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "asyncio.create_task",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "tomodachi.utils.helpers.utcnow",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tomodachi.utils.helpers",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "asyncio.create_task",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tomodachi.utils.helpers.utcnow",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tomodachi.utils.helpers",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "tomodachi.core.enums.ActionType.INFRACTION",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "tomodachi.core.enums.ActionType",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "asyncio.create_task",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "ujson.dumps",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "ujson.loads",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "asyncio.create_task",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tomodachi.core.enums.ActionType.INFRACTION",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "tomodachi.core.enums.ActionType",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "typing.final",
"line_number": 55,
"usage_type": "name"
}
] |
14200847696
|
import discord
import asyncio
from discord.ext import commands
class Channels(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.role_bot_id = int(self.bot.config['Zone']['role_bot_id'])
self.channel_private_id = int(self.bot.config['Zone']['channel_private_id'])
self.category_private_id = int(self.bot.config['Zone']['category_private_id'])
@commands.command()
async def create(self, ctx):
guild = ctx.guild
role_bot = guild.get_role(self.role_bot_id)
category_private = guild.get_channel(self.category_private_id)
if role_bot and category_private:
if ctx.channel.id == self.channel_private_id:
if f'room-{ctx.author.name}' in [ch.name for ch in guild.text_channels]:
e_msg = discord.Embed(title=f'チャンネルは既に作成されています')
await ctx.reply(embed=e_msg, allowed_mentions=discord.AllowedMentions.none())
else:
overwrites = {
guild.default_role: discord.PermissionOverwrite(view_channel=False),
ctx.author: discord.PermissionOverwrite(view_channel=True),
role_bot: discord.PermissionOverwrite(view_channel=True)
}
channel = await guild.create_text_channel(f'room-{ctx.author.name}',
overwrites=overwrites,
category=category_private)
s_msg = discord.Embed(title='プライベートチャンネルを作成しました', description=f'チャンネル: {channel.mention}')
await ctx.reply(embed=s_msg, allowed_mentions=discord.AllowedMentions.none())
@commands.command()
async def clean(self, ctx):
guild = ctx.guild
category_private = guild.get_channel(self.category_private_id)
if category_private:
if ctx.channel.id == self.channel_private_id:
user_channel = [ch for ch in guild.text_channels if ch.name == f'room-{ctx.author.name}']
if user_channel:
e_msg = discord.Embed(title=f'チャンネの再生成',
description="再生成する場合は`y`、キャンセルする場合は`n`を送信してください")
re_msg = await ctx.reply(embed=e_msg, allowed_mentions=discord.AllowedMentions.none())
def check(message):
if message.author == ctx.author and (message.content in ["y", "n"]):
return message.content
try:
msg = await self.bot.wait_for('message', timeout=15.0, check=check)
except asyncio.TimeoutError:
await re_msg.edit(discord.Embed(description='時間切れです'))
if msg.content == 'y':
await msg.delete()
new_channel = await user_channel[0].clone(name=f'room-{ctx.author.name}')
await user_channel[0].delete()
await re_msg.edit(embed=discord.Embed(title='再生成しました',
description=f'チャンネル: {new_channel.mention}'))
elif msg.content == 'n':
await msg.delete()
await re_msg.edit(embed=discord.Embed(description='キャンセルしました'))
else:
pass
else:
await ctx.reply(embed=discord.Embed(description="プライベートチャンネルが見つかりません"),
allowed_mentions=discord.AllowedMentions.none())
def setup(bot):
bot.add_cog(Channels(bot))
|
yutarou12/bot-zone
|
cogs/channels.py
|
channels.py
|
py
| 3,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions.none",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "discord.PermissionOverwrite",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "discord.PermissionOverwrite",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "discord.PermissionOverwrite",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions.none",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions.none",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "asyncio.TimeoutError",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions.none",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "discord.AllowedMentions",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 36,
"usage_type": "name"
}
] |
18091289859
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0045_auto_20150130_0558'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='43f9a685bc7146b4ecc63bdf9bc3e5136b7543f436a42e4a2f2ae749ffb0c6db', max_length=64),
preserve_default=True,
),
]
|
hongdangodori/slehome
|
slehome/account/migrations/0046_auto_20150130_0600.py
|
0046_auto_20150130_0600.py
|
py
| 531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
}
] |
17435023939
|
# coding: utf-8
"""
Simple multithread task manager
__author_ = 'naubull2 ([email protected])'
"""
import logging
import random
import json
import time
import atexit
from queue import Queue
from threading import Thread
logger = logging.getLogger("dialog-tool")
class Worker(Thread):
"""
Thread executing tasks from a given tasks queue
"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
# run as daemon thread in background
self.daemon = True
self.start()
def run(self):
while True:
func, kwargs = self.tasks.get()
try:
func(**kwargs)
except Exception as e: # pylint: disable=broad-except
logger.error(f"Evaluator Error: {str(e)}")
finally:
# Mark this task as done, whether an exception happened or not
self.tasks.task_done()
class ThreadPool(object):
"""
Pool of threads consuming tasks from a queue
- add_task()
: Worker thread runs func(**kwargs)
: busy waiting for a task
- graceful_stop()
: Wait until all running jobs are done
"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, handler, **kwargs):
"""
Add a task to the queue
"""
self.tasks.put((handler, kwargs))
def graceful_stop(self):
"""
Wait for completion of all the tasks in the queue
"""
self.tasks.join()
class EvaluationTaskManager(object):
"""
Class for centralized managing of new evaluation tasks
"""
def __init__(self, pool_size=5):
self.pool = ThreadPool(pool_size)
atexit.register(self.finalize)
def add_task(self, handler, **kwargs):
"""
Runs handler function with **kwargs
"""
self.pool.add_task(handler, **kwargs)
def finalize(self):
"""
Registered as 'atexit' handler
"""
logger.info("MANAGER: Waiting for all jobs to finish")
self.pool.graceful_stop() # wait until all evaluations are finished
logger.info("MANAGER: all jobs are finished")
if __name__ == "__main__":
import requests
###############################################################################
# NOTE Last task is finished the last, check that threads are gracefully joined
#
# Success in handler api1: Sup.
# Success in handler api2: Sleep tight.
# MANAGER: Waiting for all jobs to finish
# Success in handler api3: Yeah lets meet after lunch
# MANAGER: all jobs are finished
###############################################################################
task_manager = EvaluationTaskManager(pool_size=2)
def sample_handler(name, url, q):
"""make a delayed call to the given API url, print output response to the logger"""
time.sleep(random.random() * 10)
try:
ret = requests.get(url, params={"q": q}).json()
except Exception as e:
logger.error(f"Error in handler {name}: {str(e)}")
else:
logger.info(f'Success in handler {name}: {ret["output"]}')
# Supoose localhost is running a conversation API on port 8988
task_manager.add_task(
sample_handler,
name="api1",
url="http://localhost:8988/chat",
q="Hey what's up"
)
task_manager.add_task(
sample_handler,
name="api2",
url="http://localhost:8988/chat",
q="Good night"
)
task_manager.add_task(
sample_handler,
name="api3",
url="http://localhost:8988/chat",
q="We had a lunch meeting tommorow?",
)
time.sleep(10)
|
naubull2/codingtests
|
frequent_subjects/task_manager.py
|
task_manager.py
|
py
| 3,836 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "queue.Queue",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "atexit.register",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 139,
"usage_type": "call"
}
] |
73643617788
|
from __future__ import absolute_import
import math
from collections import OrderedDict
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
import torch.utils.model_zoo as model_zoo
from .res2net import res2net50_26w_4s
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'TempoAvgPooling', 'TempoWeightedSum', 'TempoRNN']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class TempoAvgPooling(nn.Module):
""" Temporal Average Pooling """
def __init__(self, num_classes):
super(TempoAvgPooling, self).__init__()
# resnet50 = torchvision.models.resnet50(pretrained=True)
resnet50 = res2net50_26w_4s(pretrained=True)
self.backbone = nn.Sequential(*list(resnet50.children())[:-2])
self.last_layer_ch = 2048
self.classifier = nn.Linear(self.last_layer_ch, num_classes, bias=False)
nn.init.normal_(self.classifier.weight, std=0.01)
def forward(self, x):
"""
Args:
x: (b t 3 H W)
"""
b, t = x.size(0), x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.backbone(x) # (b*t c h w)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1).permute(0, 2, 1) # (b t c) to (b c t)
feature = F.avg_pool1d(x, t) # (b c 1)
feature = feature.view(b, self.last_layer_ch)
if not self.training:
return feature
logits = self.classifier(feature)
return logits, feature
class TempoWeightedSum(nn.Module):
def __init__(self, num_classes):
super(TempoWeightedSum, self).__init__()
resnet50 = torchvision.models.resnet50(pretrained=True)
self.backbone = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softMax or sigmoid
self.last_layer_ch = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.last_layer_ch, num_classes, bias=False)
nn.init.normal_(self.classifier.weight, std=0.01)
# (7,4) corresponds to (224, 112) input image size
self.spatial_attn = nn.Conv2d(self.last_layer_ch, self.middle_dim, kernel_size=[7, 4])
self.temporal_attn = nn.Conv1d(self.middle_dim, 1, kernel_size=3, padding=1)
def forward(self, x):
b, t = x.size(0), x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
featmaps = self.backbone(x) # (b*t c h w)
attn = F.relu(self.spatial_attn(featmaps)).view(b, t, -1).permute(0, 2, 1) # (b*t c 1 1) to (b t c) to (b c t)
attn = F.relu(self.temporal_attn(attn)).view(b, t) # (b 1 t) to (b t)
if self.att_gen == 'softmax':
attn = F.softmax(attn, dim=1)
elif self.att_gen == 'sigmoid':
attn = F.sigmoid(attn)
attn = F.normalize(attn, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
feature = F.avg_pool2d(featmaps, featmaps.size()[2:]).view(b, t, -1) # (b*t c 1 1) to (b t c)
att_x = feature * attn.unsqueeze(attn, dim=-1) # (b t c)
att_x = torch.sum(att_x, dim=1)
feature = att_x.view(b, -1) # (b c)
if not self.training:
return feature
logits = self.classifier(feature)
return logits, feature
class TempoRNN(nn.Module):
def __init__(self, num_classes):
super(TempoRNN, self).__init__()
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.hidden_dim = 512
self.feat_dim = 2048
self.classifier = nn.Linear(self.hidden_dim, num_classes, bias=False)
nn.init.normal_(self.classifier.weight, std=0.01)
self.lstm = nn.LSTM(input_size=self.feat_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
output, (h_n, c_n) = self.lstm(x)
output = output.permute(0, 2, 1)
f = F.avg_pool1d(output, t)
f = f.view(b, self.hidden_dim)
if not self.training:
return f
y = self.classifier(f)
return y, f
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, conv1_ch=3, conv5_stride=1, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(conv1_ch, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=conv5_stride)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
model = resnet50()
print(model)
for block in model.layer2:
print(block)
|
DeepAlchemist/video-person-reID
|
lib/model/resnet.py
|
resnet.py
|
py
| 11,011 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Conv2d",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "res2net.res2net50_26w_4s",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torchvision.models.resnet50",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.sigmoid",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "torch.sum",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torchvision.models.resnet50",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "torch.nn.LSTM",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 322,
"usage_type": "name"
}
] |
71877607227
|
#pyautogui 라이브러리 추가
#pip install pyautogui
import pyautogui #듀얼모니터는 인식 안됨
#마우스 현재 좌표 출력
#pyautogui.position()
#해당 좌표로 마우스 이동
#pyautogui.moveTo(40, 154)
#이미지 추출 라이브러리 추가
#pip install opencv-python
#해당하는 이미지와 유사한 화면이 존재하는 위치로 이동(출력결과 : x축 값, y축 값 , 가로 길이, 세로 길이)
#pyautogui.locateOnScreen('')
#좌표, 저장될 이미지 길이(x축 값, y축 값, 가로 길이, 세로 길이)를 지정하면 해당 좌표를 스크린샷 후 특정 이름으로 저장
pyautogui.screenshot('1.png', region=(1584, 613, 30, 30))
#해당 경로에 존재하는 이미지와 유사한 화면 위치 정가운데로 이동(출력결과 : x축 값 y축 값)
num1 = pyautogui.locateCenterOnScreen('1.png')
num7 = pyautogui.locateCenterOnScreen('7.png')
#마우스 클릭 이벤트(값이 없으면 마우스 현재 위치 클릭)
pyautogui.click(num1)
pyautogui.click(num7)
|
BrokenMental/Python-Study
|
pyautogui.py
|
pyautogui.py
|
py
| 1,032 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyautogui.screenshot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyautogui.locateCenterOnScreen",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyautogui.locateCenterOnScreen",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"line_number": 26,
"usage_type": "call"
}
] |
73743939389
|
import os
from os import walk, getcwd
from PIL import Image
""" Class label (BDD) """
# same order with yolo format class annotation
classes = [ "bike" , "bus" , "car", "motor", "person", "rider", "traffic light", "traffic sign", "train", "truck"]
""" Inverse convert function """
def i_convert(size, box):
x = box[0]*size[0]
y = box[1]*size[1]
w = box[2]*size[0]
h = box[3]*size[1]
xmin = x - w/2
xmax = x + w/2
ymin = y - h/2
ymax = y + h/2
return (xmin, xmax, ymin, ymax)
mypath = "./labels/100k/train/" # txt file path
wd = getcwd()
txt_outfile =open('gt_bdd_train.json','w') # output json file name
txt_outfile.write("[\n")
""" Get input text file list """
txt_name_list = []
for (dirpath, dirnames, filenames) in walk(mypath):
txt_name_list.extend(filenames)
break
""" Process """
start = 0
for txt_name in txt_name_list:
""" Open input text files """
txt_path = mypath + txt_name
txt_file = open(txt_path, "r")
lines = txt_file.read().splitlines()
""" Open input image file """
img_path = txt_path.replace("labels","images")
img_path = img_path.replace("txt", "jpg")
img = Image.open(img_path)
img_size = img.size
""" Convert the YOLO format to BDD evaluation format """
for line in lines:
if(len(line) > 0):
if start != 0:
txt_outfile.write(",\n")
else :
start = 1
elems = line.split()
cls_id = int(elems[0])
x = elems[1]
y = elems[2]
w = elems[3]
h = elems[4]
box = (float(x), float(y), float(w), float(h))
xmin, xmax, ymin, ymax = i_convert(img_size, box)
txt_outfile.write("\t{\n\t\t\"name\":\"%s\",\n\t\t\"category\":\"%s\",\n\t\t\"bbox\":[%f,%f,%f,%f]\n\t}" %(os.path.splitext(txt_name)[0],classes[cls_id],xmin,ymin,xmax,ymax))
txt_outfile.write("\n]")
txt_outfile.close()
|
jwchoi384/Gaussian_YOLOv3
|
bdd_evaluation/convert_txt_to_bdd_eval_json.py
|
convert_txt_to_bdd_eval_json.py
|
py
| 2,038 |
python
|
en
|
code
| 660 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
}
] |
36818284421
|
import pytest
from database import Model, ModelAttribute
pytestmark = pytest.mark.asyncio
class A(Model):
a = ModelAttribute()
b = ModelAttribute()
c = ModelAttribute()
@pytest.mark.parametrize('count', (10, 15))
async def test_insert_find(db, count):
c_true_count = 0
for i in range(count):
is_three_mod = i % 3 == 0
await db.store(A(a=i, b=i*2, c=is_three_mod))
c_true_count += is_three_mod
assert (await db.find_one(A, b=2)).a == 1
async for item in db.find(A):
assert item.a * 2 == item.b
processed = 0
limit = count // 6
async for item in db.choose(A, {'c': True}, {'c': False}, limit_=limit):
assert item.c is False
assert item.a % 3 == 0
processed += 1
assert processed == min(limit, count)
assert await db.count(A) == count
assert await db.count(A, {'c': True}) == c_true_count - processed
assert await db.count(A, {'c': False}) == count - (c_true_count - processed)
|
AzaubaevViktor/vk_grabber
|
src/database/tests/test_no_uid.py
|
test_no_uid.py
|
py
| 1,000 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "database.Model",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "database.ModelAttribute",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "database.ModelAttribute",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "database.ModelAttribute",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
}
] |
39713348458
|
from os.path import join, dirname, realpath, exists
from PIL import Image, ImageDraw, ImageFont
import numpy
import base64
from io import BytesIO
# info: image (PNG, JPG) to base64 conversion (string), learn about base64 on wikipedia https://en.wikipedia.org/wiki/Base64
def image_base64(img, img_type):
with BytesIO() as buffer:
img.save(buffer, img_type)
return base64.b64encode(buffer.getvalue()).decode()
# info: formatter preps base64 string for inclusion, ie <img src=[this return value] ... />
def image_formatter(img, img_type):
return "data:image/" + img_type + ";base64," + image_base64(img, img_type)
# text on an image
def drawFile(file, img_dict):
if exists(join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}")):
print('file exists using drawn')
return join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}")
else:
print('making file')
new_img = Image.open(join(dirname(realpath(__file__)), file))
d1 = ImageDraw.Draw(new_img)
font = ImageFont.truetype(join(dirname(realpath(__file__)), 'static/Roboto-MediumItalic.ttf'), 20)
d1.text((0, 0), f"{img_dict['label']}", font=font, fill=(255, 0, 0))
new_img.save(join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}"))
drawn_file = join(dirname(realpath(__file__)), f"static/design/drawn_images/{img_dict['file']}")
return drawn_file
# info: color_data prepares a series of images for data analysis
def image_data(path="static/design/", img_list=None): # info: path of static images is defaulted
if img_list is None: # info: color_dict is defined with defaults and these are the images showing up
img_list = [
{'source': "Katie's Phone", 'label': "Katie Hickman", 'file': "katiergb.jpg"},
{'source': "Shreya's Phone", 'label': "Shreya Ahuja", 'file': "banff.jpg"},
{'source': "Derek's Phone", 'label': "Derek Bokelman", 'file': "derekrgb.jpeg"},
{'source': "Kian's Phone", 'label': "Kian Pasokhi", 'file': "kianplane2.jpg"},
]
# info: gather analysis data and meta data for each image, adding attributes to each row in table
for img_dict in img_list:
# to fix static images
img_dict['path'] = '/' + path
file = path + img_dict['file']
print(file)
img_reference = Image.open(drawFile(file, img_dict))
img_data = img_reference.getdata() # https://www.geeksforgeeks.org/python-pil-image-getdata/
img_dict['format'] = img_reference.format
img_dict['mode'] = img_reference.mode
img_dict['size'] = img_reference.size
# info: Conversion of original Image to Base64, a string format that serves HTML nicely
img_dict['base64'] = image_formatter(img_reference, img_dict['format'])
# info: Numpy is used to allow easy access to data of image, python list
img_dict['data'] = numpy.array(img_data)
img_dict['hex_array'] = []
img_dict['binary_array'] = []
img_dict['gray_data'] = []
# info: 'data' is a list of RGB data, the list is traversed and hex and binary lists are calculated and formatted
for pixel in img_dict['data']:
# hexadecimal conversions
hex_value = hex(pixel[0])[-2:] + hex(pixel[1])[-2:] + hex(pixel[2])[-2:]
hex_value = hex_value.replace("x", "0")
img_dict['hex_array'].append("#" + hex_value)
# binary conversions
bin_value = bin(pixel[0])[2:].zfill(8) + " " + bin(pixel[1])[2:].zfill(8) + " " + bin(pixel[2])[2:].zfill(8)
img_dict['binary_array'].append(bin_value)
# info: create gray scale of image, ref: https://www.geeksforgeeks.org/convert-a-numpy-array-to-an-image/
# for pixel in img_dict['data']: we changed this to a # to make it more efficient based on big O notation (deleting second loop)
average = (pixel[0] + pixel[1] + pixel[2]) // 3
if len(pixel) > 3:
img_dict['gray_data'].append((average, average, average, pixel[3]))
else:
img_dict['gray_data'].append((average, average, average))
# end for loop for pixel
img_reference.putdata(img_dict['gray_data'])
img_dict['base64_GRAY'] = image_formatter(img_reference, img_dict['format'])
# for hex and binary values
img_dict['hex_array_GRAY'] = []
img_dict['binary_array_GRAY'] = []
# for grayscale binary/hex changes
for pixel in img_dict['gray_data']:
# hexadecimal conversions
hex_value = hex(pixel[0])[-2:] + hex(pixel[1])[-2:] + hex(pixel[2])[-2:]
hex_value = hex_value.replace("x", "0")
img_dict['hex_array_GRAY'].append("#" + hex_value)
# binary conversions
bin_value = bin(pixel[0])[2:].zfill(8) + " " + bin(pixel[1])[2:].zfill(8) + " " + bin(pixel[2])[2:].zfill(8)
img_dict['binary_array_GRAY'].append(bin_value)
return img_list # list is returned with all the attributes for each image dictionary
# run this as standalone tester to see data printed in terminal
# if __name__ == "__main__":
# local_path = "./static/img/"
# img_test = [
# {'source': "iconsdb.com", 'label': "Blue square", 'file': "blue-square-16.png"},
# ]
# web = False
# items = image_data(local_path, img_test, web) # path of local run
# for row in items:
# # print some details about the image so you can validate that it looks like it is working
# # meta data
# print("---- meta data -----")
# print(row['label'])
# print(row['format'])
# print(row['mode'])
# print(row['size'])
# # data
# print("---- data -----")
# print(row['data'])
# print("---- gray data -----")
# print(row['gray_data'])
# print("---- hex of data -----")
# print(row['hex_array'])
# print("---- bin of data -----")
# print(row['binary_array'])
# # base65
# print("---- base64 -----")
# print(row['base64'])
# # display image
# print("---- render and write in image -----")
# filename = local_path + row['file']
# image_ref = Image.open(filename)
# draw = ImageDraw.Draw(image_ref)
# draw.text((0, 0), "Size is {0} X {1}".format(*row['size'])) # draw in image
# image_ref.show()
# print()
|
katiehickman/m224_seals
|
image.py
|
image.py
|
py
| 6,588 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "io.BytesIO",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
}
] |
6387062201
|
from jupyterthemes import install_theme, get_themes
from jupyterthemes import stylefx
def install_themes():
themes = get_themes()
for t in themes:
try:
install_theme(theme=t, monofont=mf, nbfont=nf, tcfont=tc)
except Exception:
return False
return True
def install_fonts():
fonts = stylefx.stored_font_dicts('', get_all=True)
fontvals = [list(fonts[ff]) for ff in ['mono', 'sans', 'serif']]
monotest, sanstest, seriftest = [fv[:4] for fv in fontvals]
for i in range(4):
mono, sans, serif = monotest[i], sanstest[i], seriftest[i]
try:
install_theme(theme=t, monofont=mono, nbfont=sans, tcfont=serif)
except Exception:
return False
try:
install_theme(theme=t, monofont=mono, nbfont=serif, tcfont=sans)
except Exception:
return False
return True
install_themes()
install_fonts()
|
dunovank/jupyter-themes
|
tests/test_themes.py
|
test_themes.py
|
py
| 939 |
python
|
en
|
code
| 9,665 |
github-code
|
6
|
[
{
"api_name": "jupyterthemes.get_themes",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "jupyterthemes.install_theme",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "jupyterthemes.stylefx.stored_font_dicts",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "jupyterthemes.stylefx",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "jupyterthemes.install_theme",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jupyterthemes.install_theme",
"line_number": 24,
"usage_type": "call"
}
] |
31678929018
|
# import and necessary libraries
import dask.distributed
import dask.utils
import numpy as np
import planetary_computer as pc
import xarray as xr
from IPython.display import display
from pystac_client import Client
import matplotlib.pyplot as plt
import folium
from odc.stac import configure_rio, stac_load
# Function to configure the data loading priocess
def configure_asset():
configuration = {
"sentinel-2-l2a": { # we specify the name of the data collection
"assets": { # call the asset dictionary under the data collection and load the sub-dictionaries
"*": {"data_type": "uint16", "nodata": 0},
"SCL": {"data_type": "uint8", "nodata": 0},
"visual": {"data_type": "uint8", "nodata": 0},
},
},
"*": {"warnings": "ignore"},# applies this to all assets within the data collection
}
return configuration
# Function to manage and coordinate distributed computation using dask
def client_info():
client = dask.distributed.Client() # create a dask disrtributed client which allows to manage and coordinate distributed computations.
configure_rio(cloud_defaults=True, client=client)
display(client) #display client
return client
# Function to pull image data collection
def get_data_collection(client, collection, date, tile_id):
data_catalog = client # client data source
query = data_catalog.search(
collections= [collection],# call the data collection, this time we want to call the sentinel 2 data collection
datetime= date, # cloudfree date
query={"s2:mgrs_tile": dict(eq= tile_id)}, # we select a specific tile from northern parts of Ghana, 'Janga'
)
# list the number of dataset, but this time we only need one
images = list(query.items())
# print the number of datasets found
print(f"Found;{len(images):d} datasets")
# we expect a single dataset since we selected a single day
return images
# Function to Lazy load entire bands in data collection
def load_dataset_with_resolution(images, configuration, resolution):
# specify the parameters
dataset = stac_load(
images, chunks={"x":2048, "y":2048},
stac_cfg=configuration, patch_url=pc.sign,
resolution=resolution,
)
# list the bands in the dataset
print(f"Bands: {','.join(list(dataset.data_vars))}")
#display the dataset
display(dataset)
return dataset
# Function to select specific bands
def select_bands(images, configuration, resolution):
dataset = stac_load(
images, bands=["red", "green", "blue", "nir", "SCL"],# select needed bands
chunks={"x":2048, "y":2048},
stac_cfg=configuration, patch_url=pc.sign,
resolution=resolution,
)
# List the selected bands
print(f"Bands: {','.join(list(dataset.data_vars))}")
# Display the dataset
display(dataset)
return dataset
# Function to convert data to float
def to_float(dataset):
dataset_float_1 = dataset.astype("float32")
nodata_1= dataset_float_1.attrs.pop("nodata", None)
if nodata_1 is None:
return dataset_float_1
return dataset_float_1.where(dataset != nodata_1)
|
Christobaltobbin/OpenDataCube
|
Scripts/odc_utils.py
|
odc_utils.py
|
py
| 3,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dask.distributed.distributed.Client",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "dask.distributed.distributed",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "dask.distributed",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "odc.stac.configure_rio",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "odc.stac.stac_load",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "planetary_computer.sign",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "IPython.display.display",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "odc.stac.stac_load",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "planetary_computer.sign",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "IPython.display.display",
"line_number": 83,
"usage_type": "call"
}
] |
35257204680
|
import minerl
from minerl.data import BufferedBatchIter
import numpy as np
import random
from itertools import combinations
from actions import action_names
import cv2
import numpy as np
import torch
'''
The mineRL framework models actions as dictionaries of individual actions. Player recorded demonstration data has multiple
combinations of actions. The number of feasible combinations is too high and this would make it very hard for the agent
to generalize. Instead, we limit the agent to a smaller set of possible actions and their combinations. These basic actions
and their combinations are listed below. While training, we use frame skipping. Hence, one state is a combination of k frames
and their k actions. Action aggregation combines these k actions into one and action mapping maps this combined action to one
of the actions that the agent can perform.
'''
basic_actions = {'forward', 'back', 'left', 'right', 'attack', 'jump', 'look-left', 'look-right', 'look-up', 'look-down'}
action_combos = [{'forward', 'left'}, {'forward', 'right'}, {'forward', 'jump'}, {'forward', 'attack'}]
def get_aggregate_action(actions, cam_threshold=2.0):
'''
Function to aggregate actions from k transitions into one combined action
NOTE: Threshold is set to discount any micro-adjustments and only count camera movements for directional navigation
'''
# Removing spring and sneak from the actions dict
actions.pop('sneak')
actions.pop('sprint')
aggregate_action = actions
for key in aggregate_action.keys():
# Sum up the occurences of all actions other than the camera movement action
if not key=='camera':
aggregate_action[key] = np.sum(actions[key], axis=0)
else:
# For the camera action, instead of simply adding up movements, we compare the movement angle to a threshold
# The absolute maximum angle from one camera movement marks the direction of camera motion (l, r, u, d)
# We create a list with the camera movements from all k transitions called heading
heading = [0,0,0,0] # left, right, up, down
for i in list(actions[key]):
idx = np.argmax(np.abs(i))
if abs(i[idx]) > cam_threshold:
if idx == 0:
# Left OR Right
if i[idx] > 0:
# Left
heading[0] += 1
else:
# Right
heading[1] += 1
if idx == 1:
# Up OR Down
if i[idx] > 0:
# Up
heading[2] += 1
else:
# Down
heading[3] += 1
aggregate_action[key] = np.array(heading)
# Set camera movement to the direction that was chosen the most often. If multiple exist then choose one randomly
max_idx = [i for i, x in enumerate(heading) if x == max(heading)]
cam_dir = random.choice(max_idx) # 0,1,2,3 corresponds to l,r,u,d
# The 'camera' key now has the max number of direction occurences and the occured direction
aggregate_action['camera'] = [max(heading) ,cam_dir]
# Popping out any action that was not chosen
noop_list = []
for key, value in aggregate_action.items():
if not key=='camera':
if value == 0:
noop_list.append(key)
else:
if value[0] == 0:
noop_list.append(key)
for key in noop_list:
aggregate_action.pop(key)
# Mapping camera directions to the movement and dropping out the 'camera' key
cam_dirs = {0:'look-left', 1:'look-right', 2:'look-up', 3:'look-down'}
if 'camera' in aggregate_action:
cam = aggregate_action.pop('camera')
aggregate_action[cam_dirs[cam[1]]] = cam[0]
# print(aggregate_action)
return aggregate_action
def map_aggregate_action(aggregate_action):
'''
Function to map an aggregate action to one of the agent's available actions
'''
# If empty then select no-operation action
if len(aggregate_action.keys()) == 0:
action = 'noop'
# If there is only one action then pick that one
elif len(aggregate_action.keys()) == 1:
if list(aggregate_action.keys())[0] in basic_actions:
action = list(aggregate_action.keys())[0]
# If there are two actions then check if that pair is possible. Pick the pair if it is, else pick the most occuring one
elif len(aggregate_action.keys()) == 2:
if set(aggregate_action.keys()) in action_combos:
action = list(aggregate_action.keys())[0] + "_" + list(aggregate_action.keys())[1]
else:
max_idx = [i for i, x in enumerate(aggregate_action.values()) if x == max(aggregate_action.values())]
action = list(aggregate_action.keys())[random.choice(max_idx)]
# If there are more than 2 actions then check all pairs. Pick a pair with the max total occurence count
elif len(aggregate_action.keys()) > 2:
action_pairs = list(combinations(aggregate_action.keys(), 2))
max_occurences = 0
action = None
pair_match = False
for pair in action_pairs:
if set(pair) in action_combos:
pair_match = True
if aggregate_action[pair[0]] + aggregate_action[pair[1]] > max_occurences:
max_occurences = aggregate_action[pair[0]] + aggregate_action[pair[1]]
action = pair[0] + "_" + pair[1]
if not pair_match:
max_idx = [i for i, x in enumerate(aggregate_action.values()) if x == max(aggregate_action.values())]
action = list(aggregate_action.keys())[random.choice(max_idx)]
return action
def sample_demo_batch(demo_replay_memory, batch_size, grayscale=True):
'''
Returns batch_size number of transitions containing frame_stack in-game transitions. One transition here has
frame_stack number of in-game frames (because of frame-skipping and concatenation of observation images)
'''
# Setting up empty lists and zero arrays to store batch_size number of transitions
batch_states = []
batch_next_states = []
# if grayscale == True:
# batch_states = np.zeros((batch_size, 2, 64, 64))
# batch_next_states = np.zeros((batch_size, 2, 64, 64))
# else:
# batch_states = np.zeros((batch_size, 2, 64, 64, 3))
# batch_next_states = np.zeros((batch_size, 2, 64, 64, 3))
batch_actions = []
batch_rewards = []
batch_dones = []
# batch_actions = np.zeros((batch_size))
# batch_rewards = np.zeros((batch_size))
# batch_dones = np.zeros((batch_size))
count = 0
for current_states, actions, rewards, next_states, dones in demo_replay_memory:
if count == batch_size:
break
count +=1
# for i in range(batch_size):
# current_states, actions, rewards, next_states, dones = next(demo_replay_memory)
# Grayscale
if grayscale==True:
current_states_gray = np.zeros((current_states['pov'].shape[:-1]))
next_states_gray = np.zeros((next_states['pov'].shape[:-1]))
for j in range(current_states['pov'].shape[0]):
# current_states_gray = np.zeros((current_states['pov'].shape[:-1]))
# next_states_gray = np.zeros((next_states['pov'].shape[:-1]))
current_states_gray[j] = cv2.cvtColor(current_states['pov'][j], cv2.COLOR_BGR2GRAY)
next_states_gray[j] = cv2.cvtColor(next_states['pov'][j], cv2.COLOR_BGR2GRAY)
batch_states.append(current_states_gray)
batch_next_states.append(next_states_gray)
# batch_states[i] = current_states_gray
# batch_next_states[i] = next_states_gray
else:
batch_states.append(current_states['pov'])
batch_next_states.append(next_states['pov'])
# batch_states[i] = current_states['pov']
# batch_next_states[i] = next_states['pov']
batch_rewards.append(np.sum(rewards))
# batch_rewards[i] = np.sum(rewards)
aggregate_action = get_aggregate_action(actions)
agent_action = map_aggregate_action(aggregate_action)
action_idx = action_names[agent_action]
batch_actions.append(action_idx)
# batch_actions[i] = action_idx
if np.sum(dones) > 0:
batch_dones.append(1)
# batch_dones[i] = 1
else:
batch_dones.append(0)
# batch_dones[i] = 0
batch_states = torch.tensor(np.array(batch_states), dtype=torch.float32, requires_grad=True)
batch_next_states = torch.tensor(np.array(batch_next_states), dtype=torch.float32, requires_grad=True)
batch_actions = torch.tensor(np.array(batch_actions))
batch_rewards = torch.tensor(np.array(batch_rewards), dtype=torch.float32, requires_grad=True)
batch_dones = torch.tensor(np.array(batch_dones))
# batch_states = torch.tensor(batch_states, dtype=torch.float32, requires_grad=True)
# batch_next_states = torch.tensor(batch_next_states, dtype=torch.float32, requires_grad=True)
# batch_actions = torch.tensor(batch_actions)
# batch_rewards = torch.tensor(batch_rewards, dtype=torch.float32, requires_grad=True)
# batch_dones = torch.tensor(batch_dones)
return batch_states, batch_actions, batch_rewards, batch_next_states, batch_dones
|
anishhdiwan/DQfD_Minecraft
|
demo_sampling.py
|
demo_sampling.py
|
py
| 8,704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "actions.pop",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "actions.pop",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "actions.action_names",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 215,
"usage_type": "call"
}
] |
156567587
|
#-*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering
from sklearn.externals.joblib import Memory
from .clustering import Clustering
class AgglomerativeClustering(Clustering):
"""docstring for AgglomerativeClustering."""
def __init__(self, data, n_clusters = 2, affinity = 'euclidean',
memory = Memory(cachedir = None), connectivity = None,
compute_full_tree = 'auto', linkage = 'ward',
pooling_func = np.mean):
super(AgglomerativeClustering, self).__init__()
self.data = data
self.n_clusters = n_clusters
self.affinity = affinity
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.pooling_func = pooling_func
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters,
affinity = self.affinity,
memory = self.memory,
connectivity = self.connectivity,
compute_full_tree = self.compute_full_tree,
linkage = self.linkage,
pooling_func = self.pooling_func).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def model_(self):
"""Retorna o modelo de agrupamento."""
return self.model
|
netoaraujjo/hal
|
clustering/agglomerative_clustering.py
|
agglomerative_clustering.py
|
py
| 1,946 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "clustering.Clustering",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sklearn.externals.joblib.Memory",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sklearn.cluster.AgglomerativeClustering",
"line_number": 27,
"usage_type": "call"
}
] |
32638070044
|
def voto(ano):
from datetime import datetime
atual = datetime.now().year
idade = atual - ano
if 16 <= idade <= 17 or idade > 60:
return idade, 'VOTO OPCIONAL!'
elif 18 <= idade < 60:
return idade, 'VOTO OBRIGATÓRIO!'
else:
return idade, 'NÃO VOTA!'
nas = int(input('Em que ano voce nasceu? '))
print(f'Com {voto(nas)[0]} anos: {voto(nas)[1]}')
|
LeoWshington/Exercicios_CursoEmVideo_Python
|
ex101.py
|
ex101.py
|
py
| 396 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 3,
"usage_type": "name"
}
] |
7769213718
|
import numpy as np
import torch
import random
from PIL import Image
#---------------------------------------------------------#
# 将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
#---------------------------------------------------------#
def cvtColor(image):
if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
return image
else:
image = image.convert('RGB')
return image
#---------------------------------------------------#
# 对输入图像进行resize
#---------------------------------------------------#
def resize_image(image, size, letterbox_image):
iw, ih = image.size
w, h = size
if letterbox_image:
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
else:
new_image = image.resize((w, h), Image.BICUBIC)
return new_image
def get_num_classes(annotation_path):
with open(annotation_path) as f:
dataset_path = f.readlines()
labels = []
for path in dataset_path:
path_split = path.split(";")
labels.append(int(path_split[0]))
num_classes = np.max(labels) + 1
return num_classes
#---------------------------------------------------#
# 获得学习率
#---------------------------------------------------#
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def preprocess_input(image):
image /= 255.0
return image
def show_config(**kwargs):
print('Configurations:')
print('-' * 70)
print('|%25s | %40s|' % ('keys', 'values'))
print('-' * 70)
for key, value in kwargs.items():
print('|%25s | %40s|' % (str(key), str(value)))
print('-' * 70)
def random_crop(image, crop_shape, padding=None):
oshape = np.shape(image)
if padding:
oshape = (oshape[2] + 2 * padding, oshape[3] + 2 * padding)
npad = ((0, 0), (0, 0), (padding, padding), (padding, padding))
image_pad = np.lib.pad(image, pad_width=npad, mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
image_crop = image_pad[:, :, nh:nh + crop_shape[0], nw:nw + crop_shape[1]]
return image_crop
else:
print("WARNING!!! nothing to do!!!")
return image
def load_pretrained_model(net, resume_net):
print('Loading resume network...')
state_dict = torch.load(resume_net)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
def load_pretrained_model_Filter(net, state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
|
yangshunzhi1994/SCD
|
object verification/utils/utils.py
|
utils.py
|
py
| 3,489 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.shape",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.BICUBIC",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PIL.Image.BICUBIC",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.lib.pad",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.lib",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 102,
"usage_type": "call"
}
] |
31528905029
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# $Id: setup.py 30 2005-10-30 07:24:38Z oli $
import os, sys
from setuptools import setup, find_packages
sys.path.insert(0, 'package/lib')
from scapy import VERSION
PACKAGE_NAME = 'scapy'
DESCRIPTION="""Packet manipulation tool, packet generator, network scanner, packet sniffer, and much more."""
LONG_DESCRIPTION="""Powerful interactive packet... manipulation tool, packet generator, \
network... scanner, network discovery tool, and packet... sniffer."""
def find_data_files():
files = [
('/usr/local/share/doc/scapy/', ['package/doc/LICENSE']),
('/usr/local/share/doc/scapy/', ['package/doc/ChangeLog']),
('/usr/local/share/doc/scapy/', ['package/doc/TODO']),
('/usr/local/bin/', ['package/usr/bin/iscapy'])
]
if os.path.exists('package/doc/scapy.info.gz'):
files.append( ('/usr/local/info/', ['package/doc/scapy.info.gz']) )
if os.path.exists('package/doc/scapy.1.gz'):
files.append( ('/usr/local/man/man1', ['package/doc/scapy.1.gz']) )
return files
setup(name=PACKAGE_NAME,
version=VERSION,
license = """GNU General Public License (GPL)""",
platforms = ['POSIX'],
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
url = "http://www.secdev.org/projects/scapy/",
download_url = "http://www.secdev.org/projects/scapy/files/scapy.py",
author = "Philippe Biondi",
author_email = "[email protected]",
classifiers = ["""Development Status :: 4 - Beta""",
"""Environment :: Console""",
"""Intended Audience :: Developers""",
"""Intended Audience :: Education""",
"""Intended Audience :: End Users/Desktop""",
"""Intended Audience :: Information Technology""",
"""Intended Audience :: Other Audience""",
"""Intended Audience :: Science/Research""",
"""Intended Audience :: System Administrators""",
"""License :: OSI Approved :: GNU General Public License (GPL)""",
"""Natural Language :: English""",
"""Operating System :: POSIX""",
"""Programming Language :: Python""",
"""Topic :: Education :: Testing""",
"""Topic :: Internet""",
"""Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator""",
"""Topic :: Security""",
"""Topic :: Software Development :: Libraries :: Python Modules""",
"""Topic :: Software Development :: Testing""",
"""Topic :: Software Development :: Testing :: Traffic Generation""",
"""Topic :: System""",
"""Topic :: System :: Networking""",
"""Topic :: System :: Networking :: Firewalls""",
"""Topic :: System :: Networking :: Monitoring"""],
package_dir = {'':'package/lib'},
py_modules = ['scapy'],
zip_safe=True,
data_files = find_data_files()
)
|
BackupTheBerlios/gruik-svn
|
trunk/projects/packaging_scapy/setup.py
|
setup.py
|
py
| 3,232 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scapy.VERSION",
"line_number": 33,
"usage_type": "name"
}
] |
32661723209
|
from decimal import Decimal
from fractions import Fraction
from typing import Generator
from numeric_methods.language import TRANSLATE
from numeric_methods.language.docs.one_variable import SECANT_METHOD_DOCS
from numeric_methods.mathematics import compare, convert, widest_type
NUMBER = Decimal | float | Fraction
@TRANSLATE.documentation(SECANT_METHOD_DOCS)
def secant_method(function, x_prev: NUMBER, x: NUMBER, epsilon: NUMBER) -> Generator[tuple[NUMBER] | NUMBER, None, None]:
# Type normalization
Number = widest_type(x_prev, x, epsilon)
x_prev = convert(x_prev, Number)
x = convert(x, Number)
epsilon = convert(epsilon, Number)
step = 1
next_x = x - (x - x_prev) * function(x) / (function(x) - function(x_prev))
yield step, next_x
while not compare(abs(next_x - x), "<", epsilon):
step += 1
x_prev = x
x = next_x
next_x = x - (x - x_prev) * function(x) / (function(x) - function(x_prev))
yield step, next_x
yield next_x
|
helltraitor/numeric-methods
|
numeric_methods/one_variable/secant_method.py
|
secant_method.py
|
py
| 1,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "decimal.Decimal",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "fractions.Fraction",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numeric_methods.mathematics.widest_type",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numeric_methods.mathematics.convert",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numeric_methods.mathematics.convert",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numeric_methods.mathematics.convert",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numeric_methods.mathematics.compare",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numeric_methods.language.TRANSLATE.documentation",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numeric_methods.language.docs.one_variable.SECANT_METHOD_DOCS",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "numeric_methods.language.TRANSLATE",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Generator",
"line_number": 14,
"usage_type": "name"
}
] |
40687305933
|
import argparse
import json
from typing import List
from google.protobuf import json_format
from load_tests.common import (
benchmark_grpc_request,
make_full_request_type,
make_output_file_path,
)
from magma.common.service_registry import ServiceRegistry
from orc8r.protos.common_pb2 import Void
from orc8r.protos.directoryd_pb2 import (
DeleteRecordRequest,
DirectoryRecord,
GetDirectoryFieldRequest,
UpdateRecordRequest,
)
from orc8r.protos.directoryd_pb2_grpc import GatewayDirectoryServiceStub
DIRECTORYD_SERVICE_NAME = 'directoryd'
DIRECTORYD_SERVICE_RPC_PATH = 'magma.orc8r.GatewayDirectoryService'
DIRECTORYD_PORT = '127.0.0.1:50067'
PROTO_PATH = 'orc8r/protos/directoryd.proto'
def _load_subs(num_subs: int) -> List[DirectoryRecord]:
"""Load directory records"""
client = GatewayDirectoryServiceStub(
ServiceRegistry.get_rpc_channel(
DIRECTORYD_SERVICE_NAME, ServiceRegistry.LOCAL,
),
)
sids = []
for i in range(num_subs):
mac_addr = (str(i) * 2 + ":") * 5 + (str(i) * 2)
ipv4_addr = str(i) * 3 + "." + str(i) * 3 + "." + str(i) * 3 + "." + str(i) * 3
fields = {"mac-addr": mac_addr, "ipv4_addr": ipv4_addr}
sid = UpdateRecordRequest(
fields=fields,
id=str(i).zfill(15),
location=str(i).zfill(15),
)
client.UpdateRecord(sid)
sids.append(sid)
return sids
def _cleanup_subs():
"""Clear directory records"""
client = GatewayDirectoryServiceStub(
ServiceRegistry.get_rpc_channel(
DIRECTORYD_SERVICE_NAME, ServiceRegistry.LOCAL,
),
)
for record in client.GetAllDirectoryRecords(Void()).records:
sid = DeleteRecordRequest(
id=record.id,
)
client.DeleteRecord(sid)
def _build_update_records_data(num_requests: int, input_file: str):
update_record_reqs = []
for i in range(num_requests):
id = str(i).zfill(15)
location = str(i).zfill(15)
request = UpdateRecordRequest(
id=id,
location=location,
)
request_dict = json_format.MessageToDict(request)
update_record_reqs.append(request_dict)
with open(input_file, 'w') as file:
json.dump(update_record_reqs, file, separators=(',', ':'))
def _build_delete_records_data(record_list: list, input_file: str):
delete_record_reqs = []
for index, record in enumerate(record_list):
request = DeleteRecordRequest(
id=record.id,
)
request_dict = json_format.MessageToDict(request)
delete_record_reqs.append(request_dict)
with open(input_file, 'w') as file:
json.dump(delete_record_reqs, file, separators=(',', ':'))
def _build_get_record_data(record_list: list, input_file: str):
get_record_reqs = []
for index, record in enumerate(record_list):
request = GetDirectoryFieldRequest(
id=record.id,
field_key="mac-addr",
)
request_dict = json_format.MessageToDict(request)
get_record_reqs.append(request_dict)
with open(input_file, 'w') as file:
json.dump(get_record_reqs, file, separators=(',', ':'))
def _build_get_all_record_data(record_list: list, input_file: str):
request = Void()
get_all_record_reqs = json_format.MessageToDict(request)
with open(input_file, 'w') as file:
json.dump(get_all_record_reqs, file, separators=(',', ':'))
def update_record_test(args):
input_file = 'update_record.json'
_build_update_records_data(args.num_of_requests, input_file)
request_type = 'UpdateRecord'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num_of_requests, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def delete_record_test(args):
input_file = 'delete_record.json'
record_list = _load_subs(args.num_of_requests)
_build_delete_records_data(record_list, input_file)
request_type = 'DeleteRecord'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num_of_requests, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def get_record_test(args):
input_file = 'get_record.json'
record_list = _load_subs(args.num_of_requests)
_build_get_record_data(record_list, input_file)
request_type = 'GetDirectoryField'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num_of_requests, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def get_all_records_test(args):
input_file = 'get_all_records.json'
record_list = _load_subs(args.num_of_requests)
_build_get_all_record_data(record_list, input_file)
request_type = 'GetAllDirectoryRecords'
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
DIRECTORYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=2000, address=DIRECTORYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
def create_parser():
"""
Creates the argparse subparser for all args
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
parser_update_record = subparsers.add_parser(
'update_record', help='Update record in directory',
)
parser_delete_record = subparsers.add_parser(
'delete_record', help='Delete record in directory',
)
parser_get_record = subparsers.add_parser(
'get_record', help='Get specific record in directory',
)
parser_get_all_records = subparsers.add_parser(
'get_all_records', help='Get all records in directory',
)
for subcmd in [
parser_update_record,
parser_delete_record,
parser_get_record,
parser_get_all_records,
]:
subcmd.add_argument(
'--num_of_requests', help='Number of total records in directory',
type=int, default=2000,
)
subcmd.add_argument(
'--import_path', default=None, help='Protobuf import path directory',
)
parser_update_record.set_defaults(func=update_record_test)
parser_delete_record.set_defaults(func=delete_record_test)
parser_get_record.set_defaults(func=get_record_test)
parser_get_all_records.set_defaults(func=get_all_records_test)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args)
if __name__ == "__main__":
main()
|
magma/magma
|
lte/gateway/python/load_tests/loadtest_directoryd.py
|
loadtest_directoryd.py
|
py
| 7,544 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
[
{
"api_name": "orc8r.protos.directoryd_pb2_grpc.GatewayDirectoryServiceStub",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry.get_rpc_channel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry.LOCAL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "orc8r.protos.directoryd_pb2.UpdateRecordRequest",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "orc8r.protos.directoryd_pb2.DirectoryRecord",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "orc8r.protos.directoryd_pb2_grpc.GatewayDirectoryServiceStub",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry.get_rpc_channel",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry.LOCAL",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "magma.common.service_registry.ServiceRegistry",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "orc8r.protos.common_pb2.Void",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "orc8r.protos.directoryd_pb2.DeleteRecordRequest",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "orc8r.protos.directoryd_pb2.UpdateRecordRequest",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format.MessageToDict",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "orc8r.protos.directoryd_pb2.DeleteRecordRequest",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format.MessageToDict",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "orc8r.protos.directoryd_pb2.GetDirectoryFieldRequest",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format.MessageToDict",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "orc8r.protos.common_pb2.Void",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format.MessageToDict",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "google.protobuf.json_format",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "load_tests.common.benchmark_grpc_request",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_full_request_type",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_output_file_path",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "load_tests.common.benchmark_grpc_request",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_full_request_type",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_output_file_path",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "load_tests.common.benchmark_grpc_request",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_full_request_type",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_output_file_path",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "load_tests.common.benchmark_grpc_request",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_full_request_type",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "load_tests.common.make_output_file_path",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 187,
"usage_type": "attribute"
}
] |
24274585662
|
# -----------------------------------------------------------
# Creates the views for the database.
# This views are called when user navigates to a certain url.
# They are responsible for either rendering an HTML template or the API data that are requested
# For example: Navigating to the url 'api/operations/' will trigger the OperationListCreateAPIView class
# Reference: https://docs.djangoproject.com/en/4.0/topics/class-based-views/
# -----------------------------------------------------------
import csv
import decimal
import json
import logging
import os
import shutil
import sys
import threading
import time
import zipfile
from datetime import datetime
from decimal import Decimal
import numpy as np
import open3d as o3d
import pandas as pd
import pytz
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, get_user_model, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.gis.geos import point
from django.core import serializers as core_serializers
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage
from django.db.models import Avg
from django.forms.models import model_to_dict
from django.http import (FileResponse, Http404, HttpResponse,
HttpResponseNotFound, HttpResponseRedirect,
JsonResponse)
from django.shortcuts import (get_list_or_404, get_object_or_404, redirect,
render)
from django.urls import resolve, reverse, reverse_lazy
from django.views import View, generic
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from formtools.wizard.views import SessionWizardView
from guardian.shortcuts import assign_perm
from json2html import *
from logic import utils
from logic.algorithms.ballistic import ballistic
from logic.algorithms.build_map import (build_map_request_handler,
img_georeference)
from logic.algorithms.flying_report import flying_report
from logic.algorithms.lidar_point_cloud import lidar_points
from logic.algorithms.mission import mission_request_handler
from logic.algorithms.range_detection import range_detection
from logic.algorithms.water_collector import water_collector
from logic.algorithms.weather_station import (weather_station_ros_publisher,
weather_station_ros_subscriber)
from logic.Constants import Constants
from PIL import Image
from rest_framework import generics, permissions, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from .factories import *
from .forms import FlyingReportForm, JoinOperationForm, NewUserForm
from .models import Operation
from .permissions import IsOwnerOrReadOnly
from .serializers import *
logger = logging.getLogger(__name__)
# Function for creating Thread instances with stop function and timer function
class MyThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(MyThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
self._time = 0
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def time(self, seconds):
self._time = seconds
def get_time(self):
return self._time
# SAASAS
class DatabaseFiller(APIView):
'''
A class that populates the database with dummy data.
It utilizes the Factory notion, using the Factory Boy library
Reference: https://factoryboy.readthedocs.io/en/stable/orms.html
'''
def get(self, request):
UserFactory.create_batch(20)
UserLogFactory.create_batch(20)
OperationFactory.create_batch(20)
mission_points = MissionPointFactory.create_batch(10)
MissionFactory.create_batch(20, mission_points=tuple(mission_points))
mission = Mission.objects.all().first()
drones = DroneFactory.create_batch(20)
DroneToOperationLogFactory.create_batch(20)
WeatherStationFactory.create_batch(50)
TelemetryFactory.create_batch(50)
LiveStreamSessionFactory.create_batch(20)
RawFrameFactory.create_batch(20)
DetectionFactory.create_batch(20)
DetectionSessionFactory.create_batch(50)
DetectionFrameFactory.create_batch(20)
DetectedObjectFactory.create_batch(20)
AlgorithmFactory.create_batch(20)
WaterSamplerFactory.create_batch(20)
ErrorMessageFactory.create_batch(20)
FrontEndUserInputFactory.create_batch(20)
LoraTransmitterFactory.create_batch(20)
LoraTransmitterLocationFactory.create_batch(20)
LidarPointSessionFactory.create_batch(20)
LidarPointFactory.create_batch(20)
BuildMapImageFactory.create_batch(50)
BuildMapSessionFactory.create_batch(20)
ControlDeviceFactory.create_batch(20)
MissionLogFactory.create_batch(20)
return redirect('login')
class OperationListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
'''
List all operations or create new one. The get and create methods are inherited,
using the generics.ListCreateAPIView.
Tutorial Reference: https://www.django-rest-framework.org/tutorial/3-class-based-views/
'''
queryset = Operation.objects.all()
serializer_class = OperationSerializer
'''
Ensure that authenticated requests get read-write access, and unauthenticated requests get read-only access
'''
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
def perform_create(self, serializer):
'''
Allows us to modify how the instance save is managed,
and handle any information that is implicit in the incoming request or requested URL.
'''
serializer.save(
operator=self.request.user) # Operations are associated with the user that created them
class DroneListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
serializer_class = DroneSerializer
def get_queryset(self):
operation_name = self.kwargs.get("operation_name")
return Drone.objects.filter(operation__operation_name=operation_name)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class DroneRetrieveAPIView(LoginRequiredMixin, generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update (patch) or delete a drone instance
"""
queryset = Drone.objects.all()
serializer_class = DroneSerializer
lookup_field = 'drone_name'
def get_object(self):
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
obj = Drone.objects.get(drone_name=drone_name)
if obj is None:
raise Http404
return obj
def patch(self, request, *args, **kwargs):
'''
Partially update the attributes of a drone.
This is useful for example in case the drone is connected/disconnected from the platform, we update (patch)
the "is_drone_active" field to true/false. OR we can update its DroneDetection field
'''
operation_name = self.kwargs.get("operation_name")
operation_obj = Operation.objects.filter(
operator=request.user, active=True)
drone_name = self.kwargs.get("drone_name")
qs = Drone.objects.filter(
name=drone_name, operation__operation_name=operation_name)
obj = get_object_or_404(qs)
serializer = DroneSerializer(
obj, data=json.loads(request.body), partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
class DetectionRetrieveAPIView(LoginRequiredMixin, generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update (patch) or delete a detection drone instance
"""
queryset = Drone.objects.all()
serializer_class = DetectionDroneSerializer
lookup_field = 'drone_name'
def patch(self, request, *args, **kwargs):
'''
Partially update the attributes of a detection drone.
This is useful when we just want to change the detection status of the drone
'''
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
qs = Detection.objects.filter(
name=drone_name, operation__operation_name=operation_name)
obj = get_object_or_404(qs)
serializer = DetectionSerializer(
obj, data=json.loads(request.body), partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
class MissionListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = Mission.objects.all()
serializer_class = MissionSerializer
def mission_save_to_db(missionObj, dronePK, userPK, operationPK):
serializer = MissionSerializer(data=missionObj)
if serializer.is_valid():
createdMission = serializer.save()
Drone.objects.filter(pk=dronePK).update(mission=createdMission.pk)
logger.info('Mission with id {} is created successfully.'.format(
createdMission.pk))
MissionLoggerListCreateAPIView.mission_logger_save_to_db(
'START_MISSION', createdMission, userPK, operationPK, dronePK)
return True
else:
msg = 'Mission is not valid and is not created. Error: {}.'.format(
serializer.errors)
from .consumers import ErrorMsg
ErrorMsg.set_message_and_error(logger, Drone.objects.get(
pk=dronePK).operation.operation_name, msg)
return False
class MissionLoggerListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = MissionLog.objects.all()
serializer_class = MissionLoggerSerializer
def mission_logger_save_to_db(action, mission, userPK, operationPK, dronePK):
if Mission.objects.get(pk=mission.pk).mission_type == 'SEARCH_AND_RESCUE_MISSION':
algorithm = Algorithm.objects.filter(
algorithm_name='CALCULATE_SEARCH_AND_RESCUE_MISSION_PATHS_ALGORITHM', user=userPK, operation=operationPK).last()
algorithmPK = algorithm.pk
else:
algorithmPK = None
missionLoggerData = {
'action': action,
'mission': Mission.objects.get(pk=mission.pk).pk,
'user': userPK,
'operation': operationPK,
'drone': dronePK,
'algorithm': algorithmPK
}
serializerMissionLogger = MissionLoggerSerializer(
data=missionLoggerData)
if serializerMissionLogger.is_valid():
createdMissionLogger = serializerMissionLogger.save()
logger.info('Mission Logger is saved successfully.')
else:
msg = 'Mission Logger is not valid. Error: {}.'.format(
serializerMissionLogger.errors)
from .consumers import ErrorMsg
ErrorMsg.set_message_and_error(logger, Drone.objects.get(
pk=dronePK).operation.operation_name, msg)
class MissionRetrieveAPIView(LoginRequiredMixin, generic.ListView):
model = MissionLog
# fields = ('__all__')
template_name = 'aiders/missions.html'
queryset = MissionLog.objects.all()
success_url = reverse_lazy('home')
# def get(self, request, *args, **kwargs):
# context = self.get_context_data()
# return self.render_to_response(context)
#
# # self.object = self.get_object()
# # context = self.get_context_data(object=self.object)
# # return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get the context
operation = Operation.objects.get(
operation_name=self.kwargs.get('operation_name'))
if not self.request.user.has_perm('join_operation', Operation.objects.filter(operation_name=self.kwargs.get('operation_name'))[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
context = super(MissionRetrieveAPIView,
self).get_context_data(**kwargs)
missions = list(MissionLog.objects.filter(
action="START_MISSION", operation=operation))
missionRemoveList = []
for mission in missions:
if not list(MissionLog.objects.filter(mission=mission.mission, action="FINISH_MISSION", operation=operation)):
missionRemoveList.append(mission)
for mission in missionRemoveList:
missions.remove(mission)
context['mission_results'] = missions
context['operation_name'] = self.kwargs.get('operation_name')
# Create any data and add it to the context
return context
# working on Replay mission database to front end
class ReplayMissionOnlineAPIView(LoginRequiredMixin, View):
def format_time(date, prev_date=0):
edit_date = date.astimezone(pytz.timezone(
settings.TIME_ZONE)).strftime("%Y-%m-%dT%H:%M:%S.%f")
if prev_date == edit_date[:-4]+'Z':
edit_date = edit_date[:-5]+str(int(edit_date[-6])+1)+'Z'
else:
edit_date = edit_date[:-4]+'Z'
return edit_date
def save_data(table, time_field_name, description, save_table):
prev_date = 0
time_field_name_edit = time_field_name
for data in table:
time_field_name = time_field_name_edit
if isinstance(time_field_name, list):
for time_field in time_field_name[1:]:
data[time_field] = ReplayMissionOnlineAPIView.format_time(
data[time_field])
time_field_name = time_field_name[0]
data[time_field_name] = ReplayMissionOnlineAPIView.format_time(
data[time_field_name], prev_date)
if data[time_field_name] in save_table:
if description in save_table[data[time_field_name]]:
number = 1
while True:
if description+' '+str(number) in save_table[data[time_field_name]]:
number = number+1
else:
save_table[data[time_field_name]
][description+' '+str(number)] = data
break
else:
save_table[data[time_field_name]][description] = data
else:
save_table[data[time_field_name]] = {}
save_table[data[time_field_name]][description] = data
prev_date = data[time_field_name]
return save_table
def edit_drone_data(drone_list):
for drone in drone_list:
drone['drone_name'] = Drone.objects.get(
pk=drone['drone']).drone_name
return drone_list
def get(self, request, *args, **kwargs):
replay_data = {}
time_series_data = {}
operation_name = self.kwargs.get('operation_name')
mission_id = self.kwargs.get('mission_id')
mission = Mission.objects.get(id=mission_id)
Mission_start = MissionLog.objects.filter(
mission=mission, action="START_MISSION")[0]
Mission_end = MissionLog.objects.filter(
mission=mission, action="FINISH_MISSION").last()
replay_data.update({"start_time": ReplayMissionOnlineAPIView.format_time(
Mission_start.executed_at), "end_time": ReplayMissionOnlineAPIView.format_time(Mission_end.executed_at)})
DronesInOperation = list(Telemetry.objects.filter(operation=Operation.objects.get(operation_name=operation_name), received_at__range=(
Mission_start.executed_at, Mission_end.executed_at)).values('drone').annotate(n=models.Count("pk")))
TelemetryInOperation = list(Telemetry.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), received_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
BuildMapSessionInOperation = list(BuildMapSession.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), start_time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
WeatherStationInOperation = list(WeatherStation.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), current_time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
ErrorMessageInOperation = list(ErrorMessage.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
DetectionSessionInOperation = list(DetectionSession.objects.filter(
operation=Operation.objects.get(operation_name=operation_name)).values())
AlgorithmInOperation = list(Algorithm.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), executed_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
FrontEndUserInputInOperation = list(FrontEndUserInput.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
Missions = list(Telemetry.objects.filter(operation=Operation.objects.get(operation_name=operation_name), received_at__range=(
Mission_start.executed_at, Mission_end.executed_at)).values('mission_log').annotate(n=models.Count("mission_log__mission")))
missionList = []
for current_mission_data in Missions:
if current_mission_data['mission_log'] != None:
mission = MissionLog.objects.get(
pk=current_mission_data['mission_log']).mission
mission_points = list(mission.mission_points.values())
for mission_point in mission_points:
for field in mission_point:
if isinstance(mission_point[field], point.Point):
mission_point[field] = [float(mission_point[field].coords[0]), float(
mission_point[field].coords[1])]
mission_object = Mission.objects.filter(
id=mission.pk).values().last()
mission_object['mission_points'] = mission_points
mission_object['executed_at'] = ReplayMissionOnlineAPIView.format_time(
mission_object['executed_at'])
mission_object['dronePK'] = MissionLog.objects.get(
pk=current_mission_data['mission_log']).drone.pk
missionList.append(mission_object)
replay_data.update({"mission_data": missionList})
replay_data.update(
{"drone_available": ReplayMissionOnlineAPIView.edit_drone_data(DronesInOperation)})
if TelemetryInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
TelemetryInOperation, 'received_at', 'telemetry', time_series_data)
if BuildMapSessionInOperation != []:
all_images = []
for session in BuildMapSessionInOperation:
BuildMapImageInOperation = list(
BuildMapImage.objects.filter(session=session['id']).values())
all_images = all_images+BuildMapImageInOperation
for image in all_images:
for field in image:
if isinstance(image[field], decimal.Decimal):
image[field] = float(image[field])
if isinstance(image[field], point.Point):
image[field] = [float(image[field].coords[0]), float(
image[field].coords[1])]
time_series_data = ReplayMissionOnlineAPIView.save_data(
all_images, 'time', 'build_map_image', time_series_data)
if DetectionSessionInOperation != []:
for session in DetectionSessionInOperation:
DetectionFrameInOperation = list(DetectionFrame.objects.filter(
detection_session=session['id'], saved_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
for frame in DetectionFrameInOperation:
frame['drone_id'] = Drone.objects.get(
id=session['drone_id']).drone_name
time_series_data = ReplayMissionOnlineAPIView.save_data(
DetectionFrameInOperation, 'saved_at', 'detection_frame', time_series_data)
DetectionObjectsInOperation = list(DetectedObject.objects.filter(
detection_session=session['id'], detected_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
for objects in DetectionObjectsInOperation:
objects['drone_id'] = Drone.objects.get(
id=session['drone_id']).drone_name
time_series_data = ReplayMissionOnlineAPIView.save_data(
DetectionObjectsInOperation, 'detected_at', 'detected_object', time_series_data)
if WeatherStationInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
WeatherStationInOperation, 'current_time', 'weather_station', time_series_data)
if AlgorithmInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
AlgorithmInOperation, 'executed_at', 'algorithm', time_series_data)
if ErrorMessageInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
ErrorMessageInOperation, 'time', 'error', time_series_data)
if FrontEndUserInputInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
FrontEndUserInputInOperation, 'time', 'user_input', time_series_data)
for drone in DronesInOperation:
RawFrameInOperation = list(RawFrame.objects.filter(
live_stream_session__drone=Drone.objects.get(drone_name=drone['drone_name']), saved_at__range=(
Mission_start.executed_at, Mission_end.executed_at)).values())
time_series_data = ReplayMissionOnlineAPIView.save_data(
RawFrameInOperation, 'saved_at', 'video_frame', time_series_data)
replay_data.update({"time_series_data": time_series_data})
use_online_map = UserPreferences.objects.get(
user=request.user).use_online_map
return render(request, "aiders/replay_mission.html", {
"replay_data": replay_data,
"operation_name": operation_name,
'operation': Operation.objects.get(operation_name=operation_name),
'mission_drone': Mission_start.drone.drone_name,
'use_online_map': use_online_map
})
class TelemetryListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = Telemetry.objects.all().order_by('-received_at')[:10]
serializer_class = TelemetrySerializer
class ControlDeviceDataAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
def control_device_save_data_to_db(jetsonObj):
try:
ControlDevice.objects.create(
drone=jetsonObj['drone'],
cpu_usage=jetsonObj['cpu_usage'],
cpu_core_usage=jetsonObj['cpu_core_usage'],
cpu_core_frequency=jetsonObj['cpu_core_frequency'],
cpu_temp=jetsonObj['cpu_temp'],
cpu_fan_RPM=jetsonObj['cpu_fan_RPM'],
gpu_usage=jetsonObj['gpu_usage'],
gpu_frequency=jetsonObj['gpu_frequency'],
gpu_temp=jetsonObj['gpu_temp'],
ram_usage=jetsonObj['ram_usage'],
swap_usage=jetsonObj['swap_usage'],
swap_cache=jetsonObj['swap_cache'],
emc_usage=jetsonObj['emc_usage'],
)
except Exception as e:
logger.error('Control Device {} Serializer data are not valid. Error: {}.'.format(
jetsonObj["drone"].drone_name, e))
class TelemetryRetrieveAPIView(LoginRequiredMixin, generics.RetrieveUpdateDestroyAPIView):
# queryset = Telemetry.objects.all().select_related('drone')
serializer_class = TelemetrySerializer
def get_object(self):
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
'''
The following query set makes use of the "Lookups that span relationships
# lookups-that-span-relationships
Reference: https://docs.djangoproject.com/en/1.11/topics/db/queries/
'''
obj = Telemetry.objects.filter(drone__drone_name=drone_name).last()
if obj is None:
raise Http404
self.check_object_permissions(self.request, obj)
return obj
def save_telemetry_in_db(telemetryObj):
telemetryObj['water_sampler_in_water'] = water_collector.water_sampler_under_water
serializer = TelemetrySerializer(data=telemetryObj)
if serializer.is_valid():
serializer.save()
else:
msg = 'Telemetry Serializer data are not valid. Error: {}.'.format(
serializer.error)
from .consumers import ErrorMsg
ErrorMsg.set_message_and_error(logger, Drone.objects.get(
pk=telemetryObj.drone).operation.operation_name, msg)
def save_error_drone_data_in_db(errorObj):
serializer = ErrorMessageSerializer(data=errorObj)
if serializer.is_valid():
serializer.save()
else:
logger.error('Error Message Serializer data are not valid. Error: {}.'.format(
serializer.error))
class MissionPointsListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = MissionPoint.objects.all()
serializer_class = MissionPointSerializer
def list(self, request, *args, **kwargs):
'''
Overriding the default method. We want a special use case here. We want to list
the mission points for a particular mission for which the specified drone is part od
Args:
request:
*args:
**kwargs:
Returns:
'''
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
# Get the mission points for the mission that this drone is currently participating
qs = Drone.objects.filter(drone_name=drone_name, operation=Operation.objects.get(
operation_name=operation_name))
drone = get_object_or_404(qs)
mission = drone.mission
if (not mission):
raise Http404(
"This drone is not in any active missions at the moment")
mission_points = mission.mission_points.all()
queryset = self.filter_queryset(mission_points)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class UserList(LoginRequiredMixin, generics.ListAPIView):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
def get(self, request, *args, **kwargs):
users = User.objects.exclude(username="AnonymousUser")
return render(request, 'aiders/users.html', {'users': users})
class DroneList(LoginRequiredMixin, generics.ListAPIView):
queryset = Drone.objects.all()
serializer_class = DroneSerializer
def get(self, request, *args, **kwargs):
drones = Drone.objects.all()
return render(request, 'aiders/drones.html', {'drones': drones})
def save_drone_to_db(droneObj):
serializer = DroneSerializer(data=droneObj)
if serializer.is_valid():
drone = serializer.save()
logger.info('Drone Serializer id {} is saved.'.format(drone.pk))
else:
logger.error(
'Drone Serializer data are not valid. Error: {}.'.format(serializer.errors))
class UserDetail(LoginRequiredMixin, generics.RetrieveAPIView):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
class AlgorithmRetrieveView(LoginRequiredMixin, View):
queryset = Algorithm.objects.all()
serializer_class = AlgorithmSerializer
def get(self, request, *args, **kwargs):
attribute = self.kwargs.get("attr")
'''
Retrieve the algorithm with the specified id
but only the "input" or "output" attribute
'''
if attribute != "input" and attribute != "output":
return Response(status=status.HTTP_400_BAD_REQUEST)
pk = self.kwargs.get("pk")
algorithm = get_object_or_404(Algorithm.objects.filter(pk=pk))
serializer = AlgorithmSerializer(algorithm)
# res = Response(serializer.data)
# attr = res.data.get(attribute)
# res.data = attr
attr_json = serializer.data.get(attribute)
attr_html_tbale = json2html.convert(json=attr_json)
return render(request, 'aiders/algorithm_info.html', {'attr_name': attribute, 'attr_object_html_format': attr_html_tbale})
# return serializer.data.get(attribute)
# if (attribute == 'input'):
# serializer = AlgorithmSerializer(algorithm)
# res = Response(serializer.data)
# return res
# elif (attribute == 'output'):
# return Response(status=status.HTTP_404_NOT_FOUND)
# qs = Algorithm.objects.filter(pk=pk).only('output').values()
# obj = get_object_or_404(qs)
# self.check_object_permissions(self.request, obj)
# return obj
# Get first object from all objects on Algorithm
# obj = Algorithm.objects.all().first()
# self.check_object_permissions(self.request, obj)
# return obj
def save_algorithm_to_db(algorithmObj):
serializer = AlgorithmSerializer(data=algorithmObj)
if serializer.is_valid():
serializer.save()
logger.info('Algorithm Serializer is saved.')
else:
logger.error('Algorithm Serializer data are not valid. Error: {}.'.format(
serializer.errors))
class ManageOperationsView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
operations = Operation.objects.all()
users = User.objects.all()
return render(request, 'aiders/manage_operations.html', {'operations': operations, 'users': users, 'use_online_maps': False})
# class JoinOperationView(LoginRequiredMixin,View):
# def get(self, request, *args, **kwargs):
# operation_id = self.kwargs.get("operation_id")
# operation = Operation.objects.get(pk=operation_id)
# return render(request, 'aiders/join_operation.html', {'operation': operation})
class ManagePermissionsView(LoginRequiredMixin, generic.UpdateView):
def get(self, request, *args, **kwargs):
if not request.user.has_perm('aiders.edit_permissions'):
raise PermissionDenied(
"You do not have permission to read the permissions.")
users = User.objects.exclude(username="AnonymousUser")
for user in users:
user.permission_edit_permissions = user.has_perm(
'aiders.edit_permissions')
user.permission_create_operations = user.has_perm(
'aiders.create_operations')
user.save()
operation_groups = ''
all_groups = Group.objects.all()
for group in all_groups:
if(str(group.name).__contains__(' operation join')):
operation_groups = operation_groups + \
(group.name).replace(' operation join', '')+','
return render(request, 'aiders/manage_permissions.html', {'users': users, 'all_groups': operation_groups})
def post(self, request, *args, **kwargs):
if not request.user.has_perm('aiders.edit_permissions'):
raise PermissionDenied(
"You do not have permission to change the permissions.")
for user in User.objects.exclude(username="AnonymousUser"):
User.update_permissions(user.id, 'permission_edit_permissions', str(
user.id) in request.POST.getlist('permission_edit_permissions'))
User.update_permissions(user.id, 'permission_create_operations', str(
user.id) in request.POST.getlist('permission_create_operations'))
users = User.objects.exclude(username="AnonymousUser")
for user in users:
user.permission_edit_permissions = user.has_perm(
'aiders.edit_permissions')
user.permission_create_operations = user.has_perm(
'aiders.create_operations')
user.save()
operation_groups = ''
all_groups = Group.objects.all()
for group in all_groups:
if(str(group.name).__contains__(' operation join')):
operation_groups = operation_groups + \
(group.name).replace(' operation join', '')+','
return render(request, 'aiders/manage_permissions.html', {'users': users, 'all_groups': operation_groups}, status=status.HTTP_202_ACCEPTED)
class ManageUserPermissionsView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
if not request.user.has_perm('aiders.edit_permissions'):
raise PermissionDenied(
"You do not have permission to change the permissions.")
user_name = self.kwargs.get("user_name")
group_list = request.POST.get('selected')
group_list = group_list.split(',')
for group in Group.objects.all():
if(str(group.name).__contains__(' operation join')):
User.objects.filter(username=user_name)[0].groups.remove(group)
for group_name in group_list:
group_object = Group.objects.filter(
name=group_name+" operation join").last()
User.objects.filter(username=user_name)[0].groups.add(group_object)
return HttpResponse(status=status.HTTP_200_OK)
def index(request):
'''
Triggered when the main page of the web app is loaded on browser
:param request:
'''
context = {'auth_form': AuthenticationForm}
if request.user.is_authenticated:
for drone in Drone.objects.filter(water_sampler_available=True):
p = threading.Thread(
target=water_collector.check_sensor, args=(drone.drone_name,))
p.start()
userQuery = User.objects.filter(pk=request.user.id)
user = get_object_or_404(userQuery)
joined_op_obj = user.joined_operation
if (joined_op_obj):
if request.method == 'POST':
previous_page = resolve(
request.POST.get('next', '/')).func.view_class
'''
If we got here on the main page after a POST request, that means user posted some data from a form
'''
if (previous_page == AlgorithmListView):
'''
Check if we got here after user selected to show results for some algorithms (That is, if we got here from aiders/algorithms.html)
If this is the case, save the results to the request session and then redirect again to this page
This is because if we don't redirect, the "POST" request will persist.
Reference: https://stackoverflow.com/a/49178154/15290071
'''
algorithm_result_ids = request.POST.getlist(
'checkedAlgoResultIDs')
request.session['checkedAlgoResultIDs'] = algorithm_result_ids
return HttpResponseRedirect(reverse('home'))
if (previous_page == MissionRetrieveAPIView):
mission_ids = request.POST.getlist('checkedMissionIDs')
request.session['checkedMissionIDs'] = mission_ids
return HttpResponseRedirect(reverse('home'))
elif request.method == 'GET':
context = {'operation': joined_op_obj,
'net_ip': os.environ.get("NET_IP", "localhost")}
'''
Check if there are any results to show for the algorithms
'''
user_wants_to_load_algorithm_results_on_map = True if request.session.get(
'checkedAlgoResultIDs') != None else False
user_wants_to_load_missions_on_map = True if request.session.get(
'checkedMissionIDs') != None else False
if (user_wants_to_load_algorithm_results_on_map):
algorithm_result_ids = request.session.get(
'checkedAlgoResultIDs')
try:
qs = Algorithm.objects.filter(
pk__in=algorithm_result_ids)
algorithm_results = get_list_or_404(qs)
algorithm_results = core_serializers.serialize(
'json', algorithm_results, fields=('pk', 'algorithm_name', 'output'))
context['algorithm_results'] = algorithm_results
del request.session['checkedAlgoResultIDs']
except:
'''
Remove the algorithm results from context if the user doesn't select an algorithm
'''
context.pop("algorithm_results", None)
else:
'''
Remove the algorithm results from context if they exist.
user does not want to load any results on the map
e.g If the previous screen was the 'login' page, user just wants to log in, not to display any algorithm results
'''
context.pop("algorithm_results", None)
else:
context = {'join_operation_form': JoinOperationForm}
use_online_map = UserPreferences.objects.get(
user=request.user).use_online_map
# context = {'auth_form': AuthenticationForm,'use_online_map':use_online_map}
context['use_online_map'] = use_online_map
return render(request, 'aiders/index.html', context)
return render(request, 'aiders/login.html', context)
class DroneModifyOperationView(LoginRequiredMixin, generic.UpdateView):
def get(self, request, *args, **kwargs):
drone_name = self.kwargs.get("drone_name")
response = Operation.objects.filter(
drones_to_operate=Drone.objects.get(drone_name=drone_name).pk)
response = core_serializers.serialize('json', response)
drone_data = Drone.objects.get(drone_name=drone_name)
response = json.loads(response)
for data in response:
if str(data['fields']['operation_name']) == str(drone_data.operation):
data['fields'].update({'Selected': 'Selected'})
response = json.dumps(response)
return HttpResponse(response)
def post(self, request, *args, **kwargs):
operation_name = request.POST['operation_name']
drone_name = self.kwargs.get('drone_name')
drone = Drone.objects.get(drone_name=drone_name)
if operation_name == "None":
drone.operation = None
drone.save()
else:
try:
drone.operation = Operation.objects.get(
operation_name=operation_name)
drone.save()
except:
return HttpResponseNotFound("Operation not found", status=status.HTTP_400_BAD_REQUEST)
return HttpResponse(drone_name, status=status.HTTP_202_ACCEPTED)
class BuildMapAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
operation_name = self.kwargs.get('operation_name')
drone_name = request.POST.get('drone_id')
start_build_map = request.POST.get('start_build_map_boolean')
multispectral_build_map = request.POST.get(
'start_multispectral_build_map')
overlap = request.POST.get("overlap")
if start_build_map == 'true':
build_map_request_handler.buildMapPublisherSingleMessage(
drone_name, True, overlap)
logger.info(
'User sending build map request Start for drone {}.'.format(drone_name))
buildSessionActive = BuildMapSession.objects.filter(user=User.objects.get(username=request.user.username), operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name)).last()
droneActive = Drone.objects.get(
drone_name=drone_name).build_map_activated
if buildSessionActive == None:
BuildMapSession.objects.create(user=User.objects.get(username=request.user.username), operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name), folder_path=Constants.BUILD_MAP_DIR_PREFIX + drone_name + "_")
drone = Drone.objects.get(drone_name=drone_name)
drone.build_map_activated = True
drone.save()
return HttpResponse(status=status.HTTP_202_ACCEPTED)
else:
if buildSessionActive.is_active != True and droneActive != True:
BuildMapSession.objects.create(user=User.objects.get(username=request.user.username), operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name), folder_path=Constants.BUILD_MAP_DIR_PREFIX + drone_name + "_")
drone = Drone.objects.get(drone_name=drone_name)
drone.build_map_activated = True
drone.save()
return HttpResponse(status=status.HTTP_202_ACCEPTED)
elif start_build_map == 'false':
build_map_request_handler.buildMapPublisherSingleMessage(
drone_name, False, overlap)
logger.info(
'User sending build map request Stop for drone {}.'.format(drone_name))
drone = Drone.objects.get(drone_name=drone_name)
drone.build_map_activated = False
drone.save()
BuildMapSession.objects.filter(operation=Operation.objects.get(operation_name=operation_name), drone=Drone.objects.get(
drone_name=drone_name), is_active=True).update(end_time=datetime.datetime.now(tz=Constants.CYPRUS_TIMEZONE_OBJ), is_active=False)
return HttpResponse(status=status.HTTP_202_ACCEPTED)
logger.error(
'Encounter an error when user send a build map request for drone {}.'.format(drone_name))
return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
class LidarPointsAPIView(LoginRequiredMixin, generic.UpdateView):
def save_point_in_db(data, dji_name, lidar_session):
if LidarPointSession.objects.get(id=lidar_session.id).is_active == True:
LidarPoint.objects.create(
points=data,
lat=None,
lon=None,
drone=Drone.objects.get(drone_name=dji_name),
lidar_point_session=lidar_session
)
class BuildMapGetLastImageAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
operation_name = self.kwargs.get('operation_name')
drone_name = request.POST.get('drone_id')
Session = BuildMapSession.objects.filter(operation=Operation.objects.get(operation_name=operation_name), drone=Drone.objects.get(
drone_name=drone_name)).last() # operation=Operation.objects.get(operation_name=operation_name),
try:
image = Session.images.all().last()
image = model_to_dict(image)
except:
logger.error(
'Encounter an error while searching for a Build Map image for drone {}.'.format(drone_name))
return HttpResponse('', status=status.HTTP_404_NOT_FOUND)
image['top_left'] = [float(image['top_left'].coords[0]), float(
image['top_left'].coords[1])]
image['top_right'] = [float(image['top_right'].coords[0]), float(
image['top_right'].coords[1])]
image['bottom_left'] = [float(image['bottom_left'].coords[0]), float(
image['bottom_left'].coords[1])]
image['bottom_right'] = [float(image['bottom_right'].coords[0]), float(
image['bottom_right'].coords[1])]
image['centre'] = [float(image['centre'].coords[0]), float(
image['centre'].coords[1])]
image['altitude'] = float(image['altitude'])
image['bearing'] = float(image['bearing'])
logger.info(
'Found Build Map Image Successfully for drone {}.'.format(drone_name))
return HttpResponse(json.dumps(image), status=status.HTTP_202_ACCEPTED)
class BuildMapGetLastAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
operation_name = self.kwargs.get('operation_name')
drone_name = request.POST.get('drone_id')
buildMapSession = BuildMapSession.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name)).last()
if buildMapSession == None:
logger.error(
'Encounter an error while getting last image from Build Map Session for drone {}.'.format(drone_name))
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
dictionary = {}
dictionary['id'] = buildMapSession.pk
dictionary['user'] = buildMapSession.user.username
dictionary['drone_id'] = buildMapSession.drone.drone_name
dictionary['start_time'] = str(
buildMapSession.start_time.date())+" "+str(buildMapSession.start_time.time())
response = json.dumps(dictionary)
logger.info(
'Found Build Map Session Successfully for drone {}.'.format(drone_name))
return HttpResponse(response, status=status.HTTP_202_ACCEPTED)
@ csrf_exempt
def BuildMapImageView(request):
if request.method == 'POST':
img_file = request.FILES.get('image_file')
img_name = request.POST.get('image_name')
drone_name = request.POST.get('drone_name')
drone_bearing = float(request.POST.get('bearing'))
drone_alt = float(request.POST.get('alt'))
drone_lat = float(request.POST.get('lat'))
drone_lon = float(request.POST.get('lon'))
extra_data = False
try:
d_roll = float(request.POST.get('d_roll'))
d_pitch = float(request.POST.get('d_pitch'))
d_yaw = float(request.POST.get('d_yaw'))
g_roll = float(request.POST.get('g_roll'))
g_pitch = float(request.POST.get('g_pitch'))
g_yaw = float(request.POST.get('g_yaw'))
extra_data = True
except:
extra_data = False
drone_instance = Drone.objects.get(drone_name=drone_name)
# if extra_data:
# # drone_bearing=drone_bearing+5
# drone_lat, drone_lon=img_georeference.high_accuracy_image_center(drone_lat, drone_lon, drone_alt, d_pitch, d_roll, drone_bearing)
destinations = img_georeference.calcPoints(
drone_lat, drone_lon, drone_bearing, drone_alt, img_name, drone_instance.model, drone_instance.camera_model)
try:
if drone_instance.is_connected_with_platform and drone_instance.build_map_activated:
Session = BuildMapSession.objects.filter(
drone=Drone.objects.get(drone_name=drone_name)).last()
Image.open(img_file)
file_name = default_storage.save(os.path.join(
Session.folder_path, img_file.name), img_file)
if extra_data:
image = BuildMapImage.objects.create(
path=Session.folder_path+'/'+img_name,
top_left=Point(
destinations[2].longitude, destinations[2].latitude),
top_right=Point(
destinations[0].longitude, destinations[0].latitude),
bottom_left=Point(
destinations[1].longitude, destinations[1].latitude),
bottom_right=Point(
destinations[3].longitude, destinations[3].latitude),
centre=Point(drone_lon, drone_lat),
altitude=Decimal(drone_alt),
bearing=Decimal(drone_bearing),
d_roll=d_roll,
d_pitch=d_pitch,
d_yaw=d_yaw,
g_roll=g_roll,
g_pitch=g_pitch,
g_yaw=g_yaw,
session=Session,
)
else:
image = BuildMapImage.objects.create(
path=Session.folder_path+'/'+img_name,
top_left=Point(
destinations[2].longitude, destinations[2].latitude),
top_right=Point(
destinations[0].longitude, destinations[0].latitude),
bottom_left=Point(
destinations[1].longitude, destinations[1].latitude),
bottom_right=Point(
destinations[3].longitude, destinations[3].latitude),
centre=Point(drone_lon, drone_lat),
altitude=Decimal(drone_alt),
bearing=Decimal(drone_bearing),
d_roll=None,
d_pitch=None,
d_yaw=None,
g_roll=None,
g_pitch=None,
g_yaw=None,
session=Session,
)
logger.info(
'Saved Image Successfully for Build Map Session {}.'.format(Session.id))
return HttpResponse({'status:success'}, status=status.HTTP_200_OK)
except Exception as e:
print(e)
return HttpResponse({'status:failed'}, status=status.HTTP_400_BAD_REQUEST)
class BuildMapLoadAPIView(LoginRequiredMixin, generic.UpdateView):
def get(self, request, *args, **kwargs):
operation = Operation.objects.get(
operation_name=self.kwargs['operation_name'])
list_of_operation = list(operation.buildmapsession_set.all())
response = []
for data in list_of_operation:
dictionary = {}
dictionary['id'] = data.pk
dictionary['user'] = data.user.username
dictionary['drone_id'] = data.drone.drone_name
dictionary['start_time'] = str(
data.start_time.date())+" "+str(data.start_time.time())
dictionary['end_time'] = str(
data.end_time.date())+" " + str(data.end_time.time())
# Checks if the Session haves images
if list(BuildMapImage.objects.filter(session=data)) != []:
response.append(dictionary)
json_string = json.dumps(response)
return HttpResponse(json_string)
def post(self, request, *args, **kwargs):
try:
build_map_id = json.loads(
request.body.decode('utf-8'))['build_map_id']
except:
return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
print(build_map_id)
map_build = list(BuildMapImage.objects.filter(
session_id=build_map_id).values())
print(map_build)
for data in map_build:
data['time'] = str(data['time'])
data['top_left'] = [float(data['top_left'].coords[0]), float(
data['top_left'].coords[1])]
data['top_right'] = [float(data['top_right'].coords[0]), float(
data['top_right'].coords[1])]
data['bottom_left'] = [float(data['bottom_left'].coords[0]), float(
data['bottom_left'].coords[1])]
data['bottom_right'] = [float(data['bottom_right'].coords[0]), float(
data['bottom_right'].coords[1])]
data['centre'] = [float(data['centre'].coords[0]), float(
data['centre'].coords[1])]
data['altitude'] = float(data['altitude'])
data['bearing'] = float(data['bearing'])
json_string = json.dumps(map_build)
return HttpResponse(json_string, status=status.HTTP_201_CREATED)
class FirePredictionCreateAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
for jsonPostData in request:
try:
PostData = json.loads(jsonPostData)
if PostData['user']:
operation = Operation.objects.get(
operation_name=self.kwargs['operation_name'])
operationPK = operation.pk
user = User.objects.get(username=PostData['user'])
userPK = user.pk
algorithmName = 'FIRE_PROPAGATION_ALGORITHM'
canBeLoadedOnMap = True
input = PostData
del input['user']
try:
output = utils.handleAlgorithmExecution(
operationPK, input, canBeLoadedOnMap, algorithmName, userPK)
except Exception as e:
print(e)
return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
response = '['+str(output)+']'
return HttpResponse(response, status=status.HTTP_201_CREATED)
except:
pass
raise Http404
def login_view(request):
if request.method == 'GET':
redirect_to = request.GET.get('next')
if request.user.is_authenticated:
if redirect_to != None:
return HttpResponseRedirect(redirect_to)
return HttpResponseRedirect(reverse('manage_operations'))
return render(request, 'aiders/login.html', {'auth_form': AuthenticationForm, 'next': redirect_to})
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
redirect_to = request.POST['next']
user = authenticate(request, username=username, password=password)
if user is not None:
if user.is_active:
if request.META.get('HTTP_X_FORWARDED_FOR'):
ip = request.META.get('HTTP_X_FORWARDED_FOR')
else:
ip = request.META.get('REMOTE_ADDR')
from user_agents import parse
user_agent = parse(request.META.get('HTTP_USER_AGENT'))
'''
When user logs in, save a few data that concern their machine
'''
terminal = Terminal(ip_address=ip, user=user,
os=user_agent.os.family,
device=user_agent.device.family,
logged_in=True,
browser=user_agent.browser.family)
terminal.save()
if not UserPreferences.objects.filter(user=user).exists():
UserPreferences.objects.create(
use_online_map=True, user=user)
login(request, user,
backend='django.contrib.auth.backends.ModelBackend')
if redirect_to != "None":
return HttpResponseRedirect(redirect_to)
return redirect('manage_operations')
else:
messages.error(request, 'Wrong username or password!')
return render(request, 'aiders/login.html', {'auth_form': AuthenticationForm, 'next': redirect_to})
def logout_view(request):
logout(request)
# Redirect to a success page
return redirect('login')
class NewOperationForm(LoginRequiredMixin, SessionWizardView):
template_name = 'aiders/operation_new_wizard.html'
def get_form_initial(self, step):
if not self.request.user.has_perm('aiders.create_operations'):
raise PermissionDenied(
"You do not have permission to create the operation.")
def done(self, form_list, form_dict, **kwargs):
wizard_form = {k: v for form in form_list for k,
v in form.cleaned_data.items()}
operation_instance = Operation.objects.none()
wizard_form["operator"] = self.request.user
operation_instance = Operation.objects.create(
operation_name=wizard_form["operation_name"],
location=wizard_form["location"],
description=wizard_form["description"],
operator=wizard_form["operator"],
)
drone_allow_list = Drone.objects.none()
for drone_id in form_list[1].data.getlist('Drones in'):
drone_allow_list = drone_allow_list | Drone.objects.filter(
pk=drone_id)
if form_list[1].data.getlist('drone_operation') == ['True']:
print(Drone.objects.get(pk=drone_id).operation)
if Drone.objects.get(pk=drone_id).operation == None or Drone.objects.get(pk=drone_id).is_connected_with_platform == False:
Drone.objects.filter(pk=drone_id).update(
operation=operation_instance)
operation_instance.drones_to_operate.set(drone_allow_list)
group_join_operation = Group.objects.create(
name=operation_instance.operation_name+" operation join")
group_edit_operation = Group.objects.create(
name=operation_instance.operation_name+" operation edit")
assign_perm('join_operation', group_join_operation, operation_instance)
assign_perm('edit_operation', group_edit_operation, operation_instance)
for user_id in form_list[1].data.getlist('Users in'):
User.objects.filter(pk=user_id)[0].groups.add(group_join_operation)
wizard_form["operator"].groups.add(group_edit_operation)
logger.info('Operation with id {} is created successfully.'.format(
operation_instance.pk))
return redirect('manage_operations')
class EditOperationForm(LoginRequiredMixin, SessionWizardView):
template_name = 'aiders/operation_edit_wizard.html'
def get_form_initial(self, step):
operation_name = self.kwargs['operation_name']
operation = Operation.objects.get(operation_name=operation_name)
if self.request.user.has_perm('edit_operation', operation):
if 'operation_name' in self.kwargs and step == '0':
operation_dict = model_to_dict(operation)
return operation_dict
else:
return self.initial_dict.get(step, {})
else:
raise PermissionDenied(
"You do not have permission to change the operation.")
def get_context_data(self, form, **kwargs):
context = super(EditOperationForm, self).get_context_data(
form=form, **kwargs)
if self.steps.current == '1':
initial = {
'users_in': [],
'users_out': [],
}
operation_name = self.kwargs['operation_name']
operation = Operation.objects.get(operation_name=operation_name)
operation_drones_dict = model_to_dict(operation)
all_drones = list(Drone.objects.all())
for user in User.objects.all():
# Don't display the 'AnonymousUser' on the user list. We don't care about anonymous users
if not user.username == 'AnonymousUser':
if user.has_perm('join_operation', operation):
initial['users_in'].append(user)
else:
initial['users_out'].append(user)
context.update({'drones_allow': set(all_drones) & set(
operation_drones_dict['drones_to_operate'])})
context.update({'drones_all': set(all_drones) ^ set(
operation_drones_dict['drones_to_operate'])})
context.update({'users_allow': initial['users_in']})
context.update({'users_all': initial['users_out']})
context.update({'edit_form': True})
return context
def done(self, form_list, form_dict, **kwargs):
wizard_form = {k: v for form in form_list for k,
v in form.cleaned_data.items()}
drone_allow_list = Drone.objects.none()
operation_name = self.kwargs['operation_name']
operation_instance = Operation.objects.get(
operation_name=operation_name)
for drone_id in form_list[1].data.getlist('Drones in'):
drone_allow_list = drone_allow_list | Drone.objects.filter(
pk=drone_id)
if form_list[1].data.getlist('drone_operation') == ['True']:
print(Drone.objects.get(pk=drone_id).operation)
if Drone.objects.get(pk=drone_id).operation == None or Drone.objects.get(pk=drone_id).is_connected_with_platform == False:
Drone.objects.filter(pk=drone_id).update(
operation=operation_instance)
operation_instance.location = wizard_form['location']
operation_instance.description = wizard_form['description']
operation_instance.drones_to_operate.set(drone_allow_list)
operation_instance.save()
Group.objects.get(
name=operation_instance.operation_name+" operation join").delete()
group = Group.objects.create(
name=operation_instance.operation_name+" operation join")
assign_perm('join_operation', group, operation_instance)
for user_id in form_list[1].data.getlist('Users in'):
User.objects.filter(pk=user_id)[0].groups.add(group)
# Iterate over the drones that are NOT allowed on this operation.
# If these drones were until now joined on this operation, kick them out
notAllowedDrones = form_list[1].data.getlist('Drones out')
for dronePK in notAllowedDrones:
droneInstance = Drone.objects.get(pk=dronePK)
if droneInstance.operation == operation_instance:
Drone.objects.filter(
drone_name=droneInstance.drone_name).update(operation=None)
return redirect('manage_operations')
class ExecuteAlgorithmAPIView(LoginRequiredMixin, APIView):
def post(self, request, *args, **kwargs):
operation_name = kwargs['operation_name']
operation = Operation.objects.get(operation_name=operation_name)
userPK = request.user.pk
operationPK = operation.pk
algorithmDetails = request.data
algorithmName = algorithmDetails['algorithmName']
input = algorithmDetails['input']
canBeLoadedOnMap = algorithmDetails['canBeLoadedOnMap']
output = utils.handleAlgorithmExecution(
operationPK, input, canBeLoadedOnMap, algorithmName, userPK)
return Response(output)
class ExecuteMissionAPIView(LoginRequiredMixin, APIView):
def get(self, request, *args, **kwargs):
operation_name = kwargs['operation_name']
drone_name = kwargs['drone_name']
user = request.user
operation = Operation.objects.get(operation_name=operation_name)
drone = Drone.objects.get(drone_name=drone_name)
mission_log = MissionLog.objects.filter(
action='START_MISSION', user=user.pk, drone=drone, operation=operation).last()
return Response(mission_log.mission.mission_type)
def post(self, request, *args, **kwargs):
# print("Request of the Execute Mission:", request, "\nand kwargs:", kwargs)
operation_name = kwargs['operation_name']
drone_name = kwargs['drone_name']
actionDetails = request.data
user_name = request.user.username
operation = Operation.objects.get(operation_name=operation_name)
User = get_user_model()
action = actionDetails['action']
grid = actionDetails['grid']
captureAndStoreImages = actionDetails['captureAndStoreImages']
missionPath = actionDetails['mission_points']
dronePK = Drone.objects.get(drone_name=drone_name).pk
try:
missionType = actionDetails['mission_type']
except:
missionType = None
# if missionType == Mission.NORMAL_MISSION:
mission_request_handler.publishMissionToRos(
operation.pk, missionType, drone_name, grid, captureAndStoreImages, missionPath, action, request.user.pk, dronePK)
# elif missionType == Mission.SEARCH_AND_RESCUE_MISSION:
# utils.handleAlgorithmExecution(operation.pk, input, canBeLoadedOnMap, userPK, algorithmName)
# pass
return Response(status=status.HTTP_200_OK)
class AlgorithmListView(LoginRequiredMixin, generic.ListView):
model = Algorithm
# fields = ('__all__')
template_name = 'aiders/algorithms.html'
queryset = Algorithm.objects.all()
success_url = reverse_lazy('home')
# def get(self, request, *args, **kwargs):
# context = self.get_context_data()
# return self.render_to_response(context)
#
# # self.object = self.get_object()
# # context = self.get_context_data(object=self.object)
# # return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get the context
operation = Operation.objects.get(
operation_name=self.kwargs.get('operation_name'))
if not self.request.user.has_perm('join_operation', Operation.objects.filter(operation_name=self.kwargs.get('operation_name'))[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
# User has to join the operation in order to view the operation's algorithms
User.objects.filter(pk=self.request.user.id).update(
joined_operation=operation)
context = super(AlgorithmListView, self).get_context_data(**kwargs)
context['algorithm_results'] = operation.algorithm_set.all()
context['operation_name'] = self.kwargs.get('operation_name')
# Create any data and add it to the context
return context
@ login_required
@ csrf_protect
def stop_operation_view(request, operation_name):
if request.method == 'GET':
opQuery = Operation.objects.filter(operation_name=operation_name)
if (opQuery.exists()):
operation = get_object_or_404(opQuery)
if (operation.active):
operation.active = False
operation.save()
return redirect('manage_operations')
@ login_required
@ csrf_protect
def leave_operation_view(request):
if request.method == 'GET':
get_user_model().objects.filter(pk=request.user.id).update(joined_operation=None)
return redirect('manage_operations')
# if (userQuery.exists()):
# get_object_or_404(userQuery).update(joined_operation=None)
# user.joined_operation = None
# user.save()
# return redirect('home')
@ login_required
@ csrf_protect
def join_operation_view(request, operation_name):
if not request.user.has_perm('join_operation', Operation.objects.filter(operation_name=operation_name)[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
if request.method == 'POST':
opQuery = Operation.objects.filter(operation_name=operation_name)
if (opQuery.exists()):
operation = get_object_or_404(opQuery)
if (operation.active):
User.objects.filter(pk=request.user.id).update(
joined_operation=operation)
# get_object_or_404(user_query)
return redirect('home')
else:
raise Http404('Operation Not Found')
else:
raise Http404('Operation Not Found')
return JsonResponse({'success': False})
@ csrf_protect
def register_request(request):
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('manage_operations')
else:
form = NewUserForm()
return render(request=request, template_name='aiders/register.html', context={"register_form": form})
class DetectionAPIOperations():
@ staticmethod
def create_detection_session_on_db(user, operation, drone):
return DetectionSession.objects.create(
user=user,
operation=operation,
drone=drone
)
@ staticmethod
def save_frame_to_db(frame_file, detection_session):
detFrame = DetectionFrame.objects.create(
frame=frame_file,
detection_session=detection_session,
)
return detFrame
@ staticmethod
def update_detection_status_on_db(drone, detection_status, detection_type_str):
qs = Detection.objects.filter(drone__drone_name=drone.drone_name).update(
detection_status=detection_status, detection_type_str=detection_type_str)
@ staticmethod
def update_detection_session_end_time(detection_session):
end_time = datetime.datetime.now(tz=Constants.CYPRUS_TIMEZONE_OBJ)
DetectionSession.objects.filter(pk=detection_session.id).update(
end_time=end_time, is_active=False)
@ staticmethod
def update_latest_frame(detection_session, latest_frame_url):
DetectionSession.objects.filter(pk=detection_session.id).\
update(latest_frame_url=latest_frame_url)
@ staticmethod
def save_detected_object_to_db(detection_session, detectedObj, frame):
DetectedObject.objects.create(
track_id=detectedObj.trk_id,
label=detectedObj.label,
lat=detectedObj.lat,
lon=detectedObj.lon,
detection_session=detection_session,
distance_from_drone=detectedObj.distFromDrone,
frame=frame
)
class LiveStreamAPIOperations(LoginRequiredMixin, generics.RetrieveAPIView):
# def get(self, request, *args, **kwargs):
# operation_name=self.kwargs.get('operation_name')
# drone_name = self.kwargs.get('drone_name')
@ staticmethod
def create_live_stream_session_on_db(drone):
return LiveStreamSession.objects.create(
drone=drone
)
@ staticmethod
def save_raw_frame_to_db(frame_file, drone_name, live_stream_session):
detFrame = RawFrame.objects.create(
frame=frame_file,
drone=Drone.objects.get(drone_name=drone_name),
live_stream_session=live_stream_session,
)
return detFrame
@ staticmethod
def update_latest_raw_frame(live_stream_session, latest_frame_url):
LiveStreamSession.objects.filter(pk=live_stream_session.id).\
update(latest_frame_url=latest_frame_url)
@ api_view(['GET'])
def objects_detected_on_last_frame_api_view(request, operation_name, drone_name):
if request.method == 'GET':
try:
active_detection_session = DetectionSession.objects.filter(
is_active=True, operation__operation_name=operation_name, drone__drone_name=drone_name)
active_detection_session = DetectionSession.objects.get(
is_active=True, operation__operation_name=operation_name, drone__drone_name=drone_name)
# Get the last frame object for the active detection session
latest_frame = DetectionFrame.objects.filter(
detection_session=active_detection_session).last()
# Get the detected objects that appear on the last frame
detected_objects = DetectedObject.objects.filter(
frame=latest_frame)
except DetectionSession.DoesNotExist:
return Response({'error': Constants.NO_ACTIVE_DETECTION_SESSION_ERROR_MESSAGE})
if detected_objects == None:
return Response({'error': "No objects detected on last frame"})
serializer = DetectedObjectSerializer(detected_objects, many=True)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def last_detection_frame_api_view(request, operation_name, drone_name):
if request.method == 'GET':
try:
active_detection_session = DetectionSession.objects.get(
is_active=True, drone__drone_name=drone_name)
except DetectionSession.DoesNotExist:
return Response({'latest_frame_url': Constants.NO_ACTIVE_DETECTION_SESSION_ERROR_MESSAGE})
serializer = DetectionSessionSerializer(active_detection_session)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def last_raw_frame_api_view(request, operation_name, drone_name):
if request.method == 'GET':
try:
active_detection_session = LiveStreamSession.objects.get(
is_active=True, drone__drone_name=drone_name)
except LiveStreamSession.DoesNotExist:
return Response({'latest_frame_url': Constants.NO_ACTIVE_LIVE_STREAM_SESSION_ERROR_MESSAGE})
serializer = LiveStreamSessionSerializer(active_detection_session)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def detection_types_api_view(request, operation_name):
if request.method == 'GET':
from logic.algorithms.object_detection.src.models.label import \
get_labels_all
return Response({'detection_types': list(get_labels_all())})
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def live_stream_status_api_view(request, operation_name, drone_name):
if request.method == 'GET':
liveStreamSession = LiveStreamSession.objects.get(
drone__drone_name=drone_name)
if (liveStreamSession.is_active):
return Response({'is_live_stream_active': True})
else:
return Response({'is_live_stream_active': False})
return Response(status=status.HTTP_400_BAD_REQUEST)
class WeatherLiveAPIView(LoginRequiredMixin, APIView):
def post(self, request, *args, **kwargs):
ThreadRunningPub = False
ThreadRunningSub = False
threadName = []
for thread in threading.enumerate():
if thread.name == 'MainWeatherPublisher':
threadName.append(thread)
ThreadRunningPub = True
elif thread.name == 'MainWeatherSubscriber':
threadName.append(thread)
ThreadRunningSub = True
if request.data['state'] == 'true':
operation_name = self.kwargs.get('operation_name')
operation_name = operation_name.replace(' ', '~')
if ThreadRunningPub == False:
publisherThread = MyThread(name='MainWeatherPublisher', target=weather_station_ros_publisher.main, args=(
operation_name, 'MainWeatherPublisher'))
sys.argv = Constants.START_WEATHER_DATA_PUBLISHER_SCRIPT[1:]
publisherThread.start()
if ThreadRunningSub == False:
subscriberThread = MyThread(name='MainWeatherSubscriber', target=weather_station_ros_subscriber.main, args=(
operation_name, 'MainWeatherSubscriber'))
subscriberThread.start()
else:
for thread in threadName:
thread.stop()
return HttpResponse('Threads up', status=status.HTTP_200_OK)
class WeatherStationAPIView(LoginRequiredMixin, generics.RetrieveAPIView):
queryset = WeatherStation.objects.all()
serializer_class = WeatherStationSerializer
def addWeatherStationDataToDB(data, object_name):
object_name = object_name.replace('~', ' ')
try:
operation_name = Operation.objects.get(operation_name=object_name)
WeatherStation.objects.create(
wind_speed=data.speed,
wind_direction=data.direction,
temperature=data.temperature,
pressure=data.pressure,
humidity=data.humidity,
heading=data.heading,
operation=Operation.objects.get(operation_name=operation_name),
drone=None,
)
except Operation.DoesNotExist:
operation_name = None
try:
drone_name = Drone.objects.get(drone_name=object_name)
WeatherStation.objects.create(
wind_speed=data.speed,
wind_direction=data.direction,
temperature=data.temperature,
pressure=data.pressure,
humidity=data.humidity,
heading=data.heading,
operation=None,
drone=Drone.objects.get(drone_name=drone_name),
)
except Drone.DoesNotExist:
drone_name = None
def system_monitoring_save_to_db(cpu_usage, cpu_core_usage, cpu_temp, gpu_usage, gpu_memory, gpu_temp, ram_usage, swap_memory_usage, temp, mb_new_sent, mb_new_received, mb_new_total, disk_read, disk_write, battery_percentage):
SystemMonitoring.objects.create(
cpu_usage=cpu_usage,
cpu_core_usage=cpu_core_usage,
cpu_temp=cpu_temp,
gpu_usage=gpu_usage,
gpu_memory=gpu_memory,
gpu_temp=gpu_temp,
ram_usage=ram_usage,
swap_memory_usage=swap_memory_usage,
temp=temp,
upload_speed=mb_new_sent,
download_speed=mb_new_received,
total_network=mb_new_total,
disk_read=disk_read,
disk_write=disk_write,
battery_percentage=battery_percentage
)
class buildMapSessionsAPIView(LoginRequiredMixin, generic.ListView):
model = BuildMapSession
template_name = 'aiders/buildMapSession.html'
queryset = BuildMapSession.objects.all()
def get_context_data(self, **kwargs):
# Call the base implementation first to get the context
operation = Operation.objects.get(
operation_name=self.kwargs.get('operation_name'))
if not self.request.user.has_perm('join_operation', Operation.objects.filter(operation_name=self.kwargs.get('operation_name'))[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
context = super(buildMapSessionsAPIView,
self).get_context_data(**kwargs)
context['MapSession_results'] = list(
operation.buildmapsession_set.all())
index = 0
urlList = []
list_non_zero_images = list(BuildMapImage.objects.filter().values(
'session').annotate(n=models.Count("pk")))
while index < len(context['MapSession_results']):
element = context['MapSession_results'][index]
save = False
for session_non_zero_images in list_non_zero_images:
if session_non_zero_images['session'] == context['MapSession_results'][index].id:
context['MapSession_results'][index].images = session_non_zero_images['n']
save = True
if save == False:
context['MapSession_results'].remove(element)
else:
urlList.append(self.request.build_absolute_uri(reverse(
'build_map_session_share', args=[self.kwargs.get('operation_name'), element.id])))
index += 1
context['operation_name'] = self.kwargs.get('operation_name')
context['urls'] = urlList
return context
class buildMapSessionsShareAPIView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
self.kwargs.get('pk')
buildMapSessionObject = BuildMapSession.objects.get(
pk=self.kwargs.get('pk'))
fileList = []
with open('buildMapSession.csv', 'w') as csvFile:
fileWriter = csv.writer(
csvFile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
fileWriter.writerow(
[f.name for f in BuildMapSession._meta.get_fields()])
dataList = []
for key in [f.name for f in BuildMapSession._meta.get_fields()]:
try:
dataList.append(getattr(buildMapSessionObject, key))
except:
dataList.append("")
fileWriter.writerow(dataList)
with open('buildMapImages.csv', 'w') as csvFile2:
fileWriter = csv.writer(
csvFile2, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
fileWriter.writerow(
[f.name for f in BuildMapImage._meta.get_fields()])
for data in BuildMapImage.objects.filter(session=self.kwargs.get('pk')):
dataList = []
for key in [f.name for f in BuildMapImage._meta.get_fields()]:
try:
if isinstance(getattr(data, key), point.Point):
dataList.append(str(getattr(data, key).coords[0])+' '+str(
getattr(data, key).coords[1]))
else:
dataList.append(getattr(data, key))
except:
dataList.append("")
fileWriter.writerow(dataList)
try:
if not os.path.exists(default_storage.path('')+'/temp/'):
os.makedirs(default_storage.path('')+'/temp/')
else:
shutil.rmtree(default_storage.path('')+'/temp/')
os.makedirs(default_storage.path('')+'/temp/')
shutil.move('buildMapSession.csv', default_storage.path(
'')+'/temp/buildMapSession.csv')
shutil.move('buildMapImages.csv', default_storage.path(
'')+'/temp/buildMapImages.csv')
os.mkdir(default_storage.path('')+'/temp/' +
BuildMapImage.objects.filter(session=self.kwargs.get('pk')).last().path.split('/')[0])
for data in BuildMapImage.objects.filter(session=self.kwargs.get('pk')):
shutil.copyfile(default_storage.path(data.path),
default_storage.path('')+'/temp/'+data.path)
except Exception as e:
pass
try:
zip_file = zipfile.ZipFile(default_storage.path(
'build_map_session_share.zip'), 'w')
for root, dirs, files in os.walk(default_storage.path('temp')):
for f in files:
zip_file.write(os.path.join(root, f), f)
zip_file.close()
zip_file = open(default_storage.path(
'build_map_session_share.zip'), 'rb')
return FileResponse(zip_file)
except Exception as e:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
class waterCollectionActivatedAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
drone_name = request.POST.get('drone_id')
operation_name = kwargs.get('operation_name')
if Drone.objects.get(drone_name=drone_name).water_sampler_available:
try:
water_collector.publish_message(drone_name, 1)
WaterSampler.objects.create(
drone=Drone.objects.get(drone_name=drone_name),
operation=Operation.objects.get(
operation_name=operation_name),
user=User.objects.get(pk=request.user.pk),
telemetry=Telemetry.objects.filter(
drone=Drone.objects.get(drone_name=drone_name)).last(),
)
logger.info(
'Water sampler activated for drone {}.'.format(drone_name))
return HttpResponse('Sending message to drone.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(
'Water sampler encounter an error for drone {}. Error: {}'.format(drone_name, e))
return HttpResponse('Water sampler encounter an error for drone {}.'.format(drone_name), status=status.HTTP_200_OK)
class ballisticActivatedAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
drone_name = request.POST.get('drone_id')
operation_name = kwargs.get('operation_name')
if Drone.objects.get(drone_name=drone_name).ballistic_available:
try:
ballistic.publish_message(drone_name, 1)
Ballistic.objects.create(
drone=Drone.objects.get(drone_name=drone_name),
operation=Operation.objects.get(
operation_name=operation_name),
user=User.objects.get(pk=request.user.pk),
telemetry=Telemetry.objects.filter(
drone=Drone.objects.get(drone_name=drone_name)).last(),
)
logger.info(
'Ballistic activated for drone {}.'.format(drone_name))
return HttpResponse('Sending message to drone.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(
'Ballistic encounter an error for drone {}. Error: {}'.format(drone_name, e))
return HttpResponse('Ballistic encounter an error for drone {}.'.format(drone_name), status=status.HTTP_200_OK)
class rangeFinderAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
drone_name = request.POST.get('drone_id')
start_stop = request.POST.get('start_stop')
operation_name = kwargs.get('operation_name')
if Drone.objects.get(drone_name=drone_name).camera_model:
try:
range_detection.buildMapPublisherSingleMessage(
drone_name, start_stop)
logger.info(
'Range Finder activated for drone {}.'.format(drone_name))
return HttpResponse('Sending message to drone.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(
'Range Finder encounter an error for drone {}. Error: {}'.format(drone_name, e))
return HttpResponse('Range Finder an error for drone {}.'.format(drone_name), status=status.HTTP_200_OK)
class frontEndUserInputAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
element = request.POST.get('elementId')
value = None
if request.POST.get('active') == "true":
active = True
elif request.POST.get('active') == "false":
active = False
else:
active = True
value = request.POST.get('active')
operation_name = kwargs.get('operation_name')
try:
FrontEndUserInput.objects.create(
operation=Operation.objects.get(operation_name=operation_name),
element_name=element,
active=active,
value=value
)
return HttpResponse('Action Saved Successful.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(e)
return HttpResponse("Action Not Saved Successful.", status=status.HTTP_200_OK)
class SystemMonitoringView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
if request.user.is_superuser:
return render(request, 'aiders/monitoring-platform.html', {})
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
class ControlDevicesMonitoringView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
if request.user.is_superuser:
drones = Drone.objects.all()
available_drones = list(ControlDevice.objects.filter().values(
'drone').annotate(n=models.Count("pk")))
temp = []
for drones_temp in available_drones:
temp.append(Drone.objects.get(id=drones_temp['drone']))
available_drones = temp
return render(request, 'aiders/monitoring-control-devices.html', {'drones': drones, 'available_drones': available_drones})
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
class ControlDeviceMonitoringView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
print(kwargs.get('control_device'))
if request.user.is_superuser:
drone_name = kwargs.get('control_device')
available_drones = list(ControlDevice.objects.filter().values(
'drone').annotate(n=models.Count("pk")))
temp = []
for drones_temp in available_drones:
temp.append(drones_temp['drone'])
available_drones = temp
if not Drone.objects.get(drone_name=drone_name).id in available_drones:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
return render(request, 'aiders/monitoring-control-device.html', {'drone_name': drone_name})
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
def test_my_high_accuracy(self, lat, long, altitude, pitch, roll, heading):
import math
from geopy.distance import geodesic
pitch = pitch
roll = roll
distance_pitch = altitude * math.tan(pitch*math.pi/180) # lat
distance_roll = altitude * math.tan(roll*math.pi/180) # long
destination_pitch = geodesic(
kilometers=distance_pitch/1000).destination((0, 0), heading+0)
destination_roll = geodesic(
kilometers=distance_roll/1000).destination((0, 0), heading+270)
newLat = lat+destination_pitch.latitude+destination_roll.latitude
newLong = long+destination_pitch.longitude+destination_roll.longitude
return(newLat, newLong)
class LoraTransmitterLocationRetrieveAPIView(LoginRequiredMixin, generics.RetrieveAPIView):
queryset = LoraTransmitterLocation.objects.all()
serializer_class = LoraTransmitterLocationSerializer
lookup_field = 'tagName'
def get_object(self):
tag_name = self.kwargs.get("lora_device_name")
qs = LoraTransmitterLocation.objects.filter(
loraTransmitter__tagName=tag_name)
if not qs.exists():
raise Http404('Object not found')
return qs.last() # Return the most recent information about this lora device
class LoraTransmiterListAPIView(LoginRequiredMixin, generics.ListAPIView):
queryset = LoraTransmitter.objects.all()
serializer_class = LoraTransmitterSerializer
class Lidar3DMesh(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
lidar_session_list = list(LidarPointSession.objects.filter(operation=Operation.objects.get(
operation_name=self.kwargs.get("operation_name")), is_process=True, is_active=False).values())
for session in lidar_session_list:
session['start_time'] = str(session['start_time'])
if session['end_time'] != None:
session['end_time'] = str(session['end_time'])
lidar_session_list = json.dumps(lidar_session_list)
return HttpResponse(lidar_session_list)
def post(self, request, *args, **kwargs):
mesh_id = request.POST.get('mesh_id')
data_return = {}
data_return['id'] = mesh_id
data_return['file_path'] = 'triangle_mesh/'+str(mesh_id)+'.glb'
lat = LidarPoint.objects.filter(lidar_point_session=mesh_id).aggregate(
Avg('telemetry__lat'))
lon = LidarPoint.objects.filter(
lidar_point_session=mesh_id).aggregate(Avg('telemetry__lon'))
data_return['long'] = lon['telemetry__lon__avg']
data_return['lat'] = lat['telemetry__lat__avg']
data_return['height'] = 0
data_return['heading'] = LidarPoint.objects.filter(
lidar_point_session=mesh_id)[0].telemetry.heading
data_return = json.dumps(data_return)
return HttpResponse(data_return)
class Lidar3DPoints(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
lidar_session_list = list(LidarPointSession.objects.filter(operation=Operation.objects.get(
operation_name=self.kwargs.get("operation_name")), is_active=False).values())
for session in lidar_session_list:
session['start_time'] = str(session['start_time'])
if session['end_time'] != None:
session['end_time'] = str(session['end_time'])
lidar_session_list = json.dumps(lidar_session_list)
return HttpResponse(lidar_session_list)
def post(self, request, *args, **kwargs):
mesh_id = request.POST.get('mesh_id')
list_lidar_points = list(LidarPoint.objects.filter(
lidar_point_session=mesh_id))
point_dict = self.point_db_to_json(list_lidar_points)
point_dict = {'data': point_dict}
lat = LidarPoint.objects.filter(lidar_point_session=mesh_id).aggregate(
Avg('telemetry__lat'))
lon = LidarPoint.objects.filter(
lidar_point_session=mesh_id).aggregate(Avg('telemetry__lon'))
point_dict['coordinates'] = [
lat['telemetry__lat__avg'], lon['telemetry__lon__avg']]
# print(point_dict['data']['0']['coordinates'])
# lidar_points.lidar_points_to_long_lat(0, 0, 0, 0, 0, 0)
point_dict = json.dumps(point_dict)
return HttpResponse(point_dict)
def point_db_to_json(self, points):
point_id = 0
point_dict = {}
list_colors = []
for point in points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
list_colors.append(int(data[3]))
list_colors.append(int(data[4]))
list_colors.append(int(data[5]))
color_max = max(list_colors)
color_min = min(list_colors)
for point in points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
data = [float(x) for x in data]
point_dict[str(point_id)] = {}
point_dict[str(point_id)]['coordinates'] = data[0:3]
point_dict[str(point_id)]['color'] = [
(int(data[3]) - color_min), (int(data[4]) - color_min), (int(data[5]) - color_min)]
point_id = point_id+1
return point_dict
class Lidar_process_cloud_points(LoginRequiredMixin, generics.ListAPIView):
def post(self, request, *args, **kwargs):
mesh_id = request.POST.get('mesh_id')
mesh_object = LidarPointSession.objects.get(id=mesh_id)
works = self.run_lidar_point_triangle(mesh_object)
if works == True:
mesh_object_update = LidarPointSession.objects.filter(id=mesh_id)
mesh_object_update.update(is_process=True)
return HttpResponse(200, status=status.HTTP_200_OK)
return HttpResponse(500, status=status.HTTP_200_OK)
def get(self, request, *args, **kwargs):
lidar_session_list = list(LidarPointSession.objects.filter(operation=Operation.objects.get(
operation_name=self.kwargs.get("operation_name")), is_process=False, is_active=False).values())
for session in lidar_session_list:
session['start_time'] = str(session['start_time'])
if session['end_time'] != None:
session['end_time'] = str(session['end_time'])
lidar_session_list = json.dumps(lidar_session_list)
return HttpResponse(lidar_session_list)
def run_lidar_point_triangle(self, lidar_object):
list_all_points = []
list_all_color_points = []
list_of_lidar_points = list(
LidarPoint.objects.filter(lidar_point_session=lidar_object))
list_colors = []
for point in list_of_lidar_points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
list_colors.append(int(data[3]))
list_colors.append(int(data[4]))
list_colors.append(int(data[5]))
color_max = max(list_colors)
color_min = min(list_colors)
for point in list_of_lidar_points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
data = [float(x) for x in data]
list_all_points.append(
[data[0], data[1], data[2]])
list_all_color_points.append([
(int(data[3]) - color_min) / (color_max-color_min), (int(data[4]) - color_min) / (color_max-color_min), (int(data[5]) - color_min) / (color_max-color_min)])
for loop_data in list_all_points:
loop_data = np.asarray(loop_data)
list_all_points = np.asarray(list_all_points)
for loop_data in list_all_color_points:
loop_data = np.asarray(loop_data)
list_all_color_points = np.asarray(list_all_color_points)
try:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(list_all_points)
pcd.colors = o3d.utility.Vector3dVector(list_all_color_points)
# o3d.visualization.draw_geometries([pcd], point_show_normal=True)
alpha = 0.1
tetra_mesh, pt_map = o3d.geometry.TetraMesh.create_from_point_cloud(
pcd)
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(
pcd, alpha, tetra_mesh, pt_map)
# mesh.vertex_colors = o3d.utility.Vector3dVector(
# list_all_color_points)
if not mesh.has_vertex_normals():
mesh.compute_vertex_normals()
if not mesh.has_triangle_normals():
mesh.compute_triangle_normals()
# o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True)
if not os.path.exists(default_storage.path('triangle_mesh')):
os.makedirs(default_storage.path('triangle_mesh'))
if os.path.exists(default_storage.path('triangle_mesh/'+str(lidar_object.id)+'.glb')):
os.remove(default_storage.path(
'triangle_mesh/'+str(lidar_object.id)+'.glb'))
o3d.io.write_triangle_mesh(
default_storage.path('triangle_mesh/'+str(lidar_object.id)+'.glb'), mesh)
return True
except Exception as e:
print(e)
return False
class FlyingReportAPIView(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
operation_name = self.kwargs.get("operation_name")
AvailableDroneList = list(Drone.objects.filter(
operation__operation_name=operation_name).values())
listDrones = []
for drone in AvailableDroneList:
listDrones.append({"drone_name": drone['drone_name'],
"latitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lat,
"longitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lon})
return render(request, 'aiders/flying_report.html', {'list_of_drones': listDrones, 'available_drones': json.dumps(listDrones), 'operation_name': operation_name, 'form': FlyingReportForm()})
def post(self, request, *args, **kwargs):
user = request.user.username
if request.POST.get('form_selection') != 'custom':
drone = request.POST.get('form_selection')
else:
drone = 'Unknown'
operation_name = self.kwargs.get("operation_name")
form = FlyingReportForm(request.POST)
if form.is_valid():
latitude = request.POST.get('latitude')
longitude = request.POST.get('longitude')
altitude = request.POST.get('altitude')
radius = request.POST.get('radius')
buffer_altitude = request.POST.get('buffer_altitude')
buffer_radius = request.POST.get('buffer_radius')
start_date = request.POST.get('start_date_time')
end_date = request.POST.get('end_date_time')
start_date = datetime.datetime.strptime(
start_date, '%Y-%m-%dT%H:%M')
end_date = datetime.datetime.strptime(
end_date, '%Y-%m-%dT%H:%M')
path = 'daily_fly_notams/notams' + \
str(len(FlyingReport.objects.all()))+'.pdf'
flying_report.main(user, drone, operation_name, latitude, longitude, altitude, radius,
buffer_altitude, buffer_radius, start_date, end_date, path)
try:
drone = Drone.objects.get(drone_name=drone)
except Drone.DoesNotExist:
drone = None
FlyingReport.objects.create(user=request.user, drone=drone, operation=Operation.objects.get(operation_name=operation_name), latitude=latitude, longitude=longitude, altitude=altitude,
radius=radius, buffer_altitude=buffer_altitude, buffer_radius=buffer_radius, start_date_time=start_date, end_date_time=end_date, file_path=path)
response = open(default_storage.path(path), 'rb')
return FileResponse(response)
operation_name = self.kwargs.get("operation_name")
AvailableDroneList = list(Drone.objects.filter(
operation__operation_name=operation_name).values())
listDrones = []
for drone in AvailableDroneList:
listDrones.append({"drone_name": drone['drone_name'],
"latitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lat,
"longitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lon})
return render(request, 'aiders/flying_report.html', {'list_of_drones': listDrones, 'available_drones': json.dumps(listDrones), 'operation_name': operation_name, 'form': form})
class FlyingReportTableAPIView(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
operation_name = self.kwargs.get("operation_name")
fly_reports = FlyingReport.objects.filter(
operation=Operation.objects.get(operation_name=operation_name))
return render(request, 'aiders/flying_reports.html', {'flying_reports': fly_reports, 'operation_name': operation_name})
class DroneMovementAPIView(LoginRequiredMixin, generics.ListAPIView):
def create_data_to_db(data, drone_name):
if(DroneMovement.objects.get(seq=data.seq, uid=data.uid, time_stamp=data.time_stamp) != None):
DroneMovement.objects.create(
seq=data.seq,
uid=data.uid,
time_stamp=data.timestamp,
drone=Drone.objects.get(drone_name=drone_name),
flight_logic_state=data.flight_logic_state,
wind_speed=data.wind_speed,
wind_angle=data.wind_angle,
battery_voltage=data.battery_voltage,
battery_current=data.battery_current,
position_x=data.position_x,
position_y=data.position_y,
position_z=data.position_z,
altitude=data.altitude,
orientation_x=data.orientation_x,
orientation_y=data.orientation_y,
orientation_z=data.orientation_z,
orientation_w=data.orientation_w,
velocity_x=data.velocity_x,
velocity_y=data.velocity_y,
velocity_z=data.velocity_z,
angular_x=data.angular_x,
angular_y=data.angular_y,
angular_z=data.angular_z,
linear_acceleration_x=data.linear_acceleration_x,
linear_acceleration_y=data.linear_acceleration_y,
linear_acceleration_z=data.linear_acceleration_z,
payload=data.payload,
)
def settings_view(request):
if request.user.is_authenticated:
if request.method == 'GET':
use_online_map = UserPreferences.objects.get(
user=request.user).use_online_map
return render(request, 'aiders/settings.html', {'use_online_map': use_online_map})
elif request.method == 'POST':
selectedVal = request.POST.get('map_mode_dropdown')
use_online_map = True if selectedVal == Constants.ONLINE_MAP_MODE else False
UserPreferences.objects.filter(user=request.user).update(
use_online_map=use_online_map)
return render(request, 'aiders/settings.html', {'use_online_map': use_online_map})
# Delete later
class TestingBuildMap(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
with open('aiders/buildmapimages_db.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
active = True
for row in spamreader:
my_list = " ".join(row).split(",")
# if my_list[1] == 'matrice300_5807/16650632878898530304.jpeg':
# active = True
# if my_list[1] == 'matrice300_5807/16650634753086040064.jpeg':
# active = False
# # print(active)
print(my_list[1])
print(my_list[1] ==
'matrice300_5807/16680725277336719360.jpeg')
if my_list[1] == 'Build_Maps_matrice300_5807_2022-11-10_11.28.46/16680725277336719360.jpeg':
if active:
newBearing = (float(my_list[8])+float(my_list[14]))/2
long_lat = my_list[6].split(" ")
long_lat[1] = float(long_lat[1])
long_lat[0] = float(long_lat[0])
# long_lat[1], long_lat[0]=self.test_my_high_accuracy(float(long_lat[1]),float(long_lat[0]), float(my_list[7]), float(my_list[10]), float(my_list[9]), newBearing)
print(long_lat[1], long_lat[0])
destinations = img_georeference.calcPoints(float(long_lat[1]), float(
long_lat[0]), newBearing, float(my_list[7]), my_list[1], 'none', 'Zenmuse_H20T')
# print(destinations)
# print(float(my_list[8])+newBearing)
# print(float(my_list[10]), float(my_list[9]))
try:
print(
Point(float(long_lat[0]), float(long_lat[1])))
image = BuildMapImage.objects.create(
path=my_list[1],
top_left=Point(
destinations[2].longitude, destinations[2].latitude),
top_right=Point(
destinations[0].longitude, destinations[0].latitude),
bottom_left=Point(
destinations[1].longitude, destinations[1].latitude),
bottom_right=Point(
destinations[3].longitude, destinations[3].latitude),
centre=Point(
float(long_lat[0]), float(long_lat[1])),
altitude=Decimal(my_list[7]),
bearing=Decimal(
(float(my_list[8])+float(my_list[14]))/2),
d_roll=None,
d_pitch=None,
d_yaw=None,
g_roll=None,
g_pitch=None,
g_yaw=None,
session_id=1
)
print('working')
# active=False
except Exception as e:
print(e)
return HttpResponse(status=status.HTTP_200_OK)
|
KIOS-Research/AIDERS
|
aidersplatform/django_api/aiders/views.py
|
views.py
|
py
| 110,716 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "threading.Event",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.all",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.permissions",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "permissions.IsOwnerOrReadOnly",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "consumers.ErrorMsg.set_message_and_error",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "consumers.ErrorMsg",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "consumers.ErrorMsg.set_message_and_error",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "consumers.ErrorMsg",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "pytz.timezone",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.TIME_ZONE",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 396,
"usage_type": "name"
},
{
"api_name": "models.Count",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 398,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 402,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "models.Count",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point.Point",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point.Point",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 495,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 495,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 501,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 506,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 545,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.water_collector.water_collector.water_sampler_under_water",
"line_number": 550,
"usage_type": "attribute"
},
{
"api_name": "logic.algorithms.water_collector.water_collector",
"line_number": 550,
"usage_type": "name"
},
{
"api_name": "consumers.ErrorMsg.set_message_and_error",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "consumers.ErrorMsg",
"line_number": 558,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 570,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 570,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 570,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 588,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 588,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 588,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "django.http.Http404",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 608,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 608,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 608,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 609,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 617,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 617,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 617,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 635,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 635,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 635,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 640,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 640,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 652,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 652,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 655,
"usage_type": "call"
},
{
"api_name": "json2html.convert",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 691,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 691,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.all",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 693,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 693,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 704,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 704,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 704,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 707,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 747,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 747,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 747,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 750,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 750,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 750,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 753,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 765,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 765,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 765,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 773,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.water_collector.water_collector.check_sensor",
"line_number": 777,
"usage_type": "attribute"
},
{
"api_name": "logic.algorithms.water_collector.water_collector",
"line_number": 777,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "django.urls.resolve",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 799,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 799,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 808,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 808,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.get_list_or_404",
"line_number": 822,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.serialize",
"line_number": 823,
"usage_type": "call"
},
{
"api_name": "django.core.serializers",
"line_number": 823,
"usage_type": "name"
},
{
"api_name": "forms.JoinOperationForm",
"line_number": 841,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 847,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 852,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 852,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 852,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 855,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 855,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 855,
"usage_type": "name"
},
{
"api_name": "django.core.serializers.serialize",
"line_number": 857,
"usage_type": "call"
},
{
"api_name": "django.core.serializers",
"line_number": 857,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 859,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 863,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 864,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 875,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 875,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseNotFound",
"line_number": 879,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 879,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 879,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 880,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 880,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 880,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 883,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 883,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 883,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.build_map.build_map_request_handler.buildMapPublisherSingleMessage",
"line_number": 892,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.build_map.build_map_request_handler",
"line_number": 892,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 896,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 896,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 896,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 901,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 901,
"usage_type": "name"
},
{
"api_name": "logic.Constants.Constants.BUILD_MAP_DIR_PREFIX",
"line_number": 902,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 902,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 906,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 906,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 906,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 909,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 909,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 909,
"usage_type": "name"
},
{
"api_name": "logic.Constants.Constants.BUILD_MAP_DIR_PREFIX",
"line_number": 910,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 910,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 914,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 914,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 914,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.build_map.build_map_request_handler.buildMapPublisherSingleMessage",
"line_number": 916,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.build_map.build_map_request_handler",
"line_number": 916,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 923,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 923,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 923,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.datetime.now",
"line_number": 924,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.datetime",
"line_number": 924,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 924,
"usage_type": "name"
},
{
"api_name": "logic.Constants.Constants.CYPRUS_TIMEZONE_OBJ",
"line_number": 924,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 924,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 925,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 925,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 925,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 928,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 928,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 928,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 931,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 931,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 931,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 943,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 943,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 943,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 947,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 947,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 947,
"usage_type": "name"
},
{
"api_name": "django.forms.models.model_to_dict",
"line_number": 951,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 955,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 955,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 955,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 971,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 971,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 971,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 971,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 974,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 974,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 974,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 978,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 978,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 978,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 983,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 983,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 983,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 990,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 993,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 993,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 993,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.build_map.img_georeference.calcPoints",
"line_number": 1021,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.build_map.img_georeference",
"line_number": 1021,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 1028,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 1028,
"usage_type": "name"
},
{
"api_name": "django.core.files.storage.default_storage.save",
"line_number": 1029,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1029,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 1029,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1029,
"usage_type": "attribute"
},
{
"api_name": "decimal.Decimal",
"line_number": 1043,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 1044,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 1065,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 1066,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1077,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1077,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1077,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1080,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1080,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1080,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 996,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1083,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 1083,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 1083,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1085,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1085,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1085,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 1102,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1103,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 1107,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1110,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1110,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1110,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 1129,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1130,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 1130,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1130,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1133,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 1133,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 1133,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 1137,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1139,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1139,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1139,
"usage_type": "name"
},
{
"api_name": "logic.utils.handleAlgorithmExecution",
"line_number": 1149,
"usage_type": "call"
},
{
"api_name": "logic.utils",
"line_number": 1149,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1153,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1153,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1153,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1155,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 1155,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1155,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 1158,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 1166,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 1167,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 1167,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 1168,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 1168,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 1173,
"usage_type": "call"
},
{
"api_name": "user_agents.parse",
"line_number": 1182,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 1197,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 1201,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1202,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 1204,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 1204,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 1205,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 1205,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 1209,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1211,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1214,
"usage_type": "name"
},
{
"api_name": "formtools.wizard.views.SessionWizardView",
"line_number": 1214,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 1219,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.none",
"line_number": 1226,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1226,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1226,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.create",
"line_number": 1229,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1229,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1229,
"usage_type": "name"
},
{
"api_name": "guardian.shortcuts.assign_perm",
"line_number": 1250,
"usage_type": "call"
},
{
"api_name": "guardian.shortcuts.assign_perm",
"line_number": 1251,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1259,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1262,
"usage_type": "name"
},
{
"api_name": "formtools.wizard.views.SessionWizardView",
"line_number": 1262,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1267,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1267,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1267,
"usage_type": "name"
},
{
"api_name": "django.forms.models.model_to_dict",
"line_number": 1270,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 1275,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1288,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1288,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1288,
"usage_type": "name"
},
{
"api_name": "django.forms.models.model_to_dict",
"line_number": 1289,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1313,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1313,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1313,
"usage_type": "name"
},
{
"api_name": "guardian.shortcuts.assign_perm",
"line_number": 1332,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1345,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1348,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 1348,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1351,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1351,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1351,
"usage_type": "name"
},
{
"api_name": "logic.utils.handleAlgorithmExecution",
"line_number": 1358,
"usage_type": "call"
},
{
"api_name": "logic.utils",
"line_number": 1358,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1360,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1363,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 1363,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1368,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1368,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1368,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1373,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1381,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1381,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1381,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 1383,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.mission.mission_request_handler.publishMissionToRos",
"line_number": 1394,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.mission.mission_request_handler",
"line_number": 1394,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1399,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1399,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1399,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1402,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 1402,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 1402,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 1407,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1419,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1419,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1419,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 1422,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1422,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1422,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 1423,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 1441,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1441,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1441,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 1444,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1449,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 1437,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 1438,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 1456,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1457,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 1452,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 1453,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 1468,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1468,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1468,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 1469,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 1472,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1472,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1472,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 1474,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1479,
"usage_type": "call"
},
{
"api_name": "django.http.Http404",
"line_number": 1481,
"usage_type": "call"
},
{
"api_name": "django.http.Http404",
"line_number": 1483,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 1484,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 1465,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 1466,
"usage_type": "name"
},
{
"api_name": "forms.NewUserForm",
"line_number": 1490,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 1493,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 1494,
"usage_type": "call"
},
{
"api_name": "forms.NewUserForm",
"line_number": 1496,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 1497,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 1487,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.datetime.now",
"line_number": 1525,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.datetime",
"line_number": 1525,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 1525,
"usage_type": "name"
},
{
"api_name": "logic.Constants.Constants.CYPRUS_TIMEZONE_OBJ",
"line_number": 1525,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 1525,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1547,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 1547,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 1547,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1590,
"usage_type": "call"
},
{
"api_name": "logic.Constants.Constants.NO_ACTIVE_DETECTION_SESSION_ERROR_MESSAGE",
"line_number": 1590,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 1590,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1592,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1594,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1596,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1596,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1596,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 1574,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1606,
"usage_type": "call"
},
{
"api_name": "logic.Constants.Constants.NO_ACTIVE_DETECTION_SESSION_ERROR_MESSAGE",
"line_number": 1606,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 1606,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1608,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1610,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1610,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1610,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 1599,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1620,
"usage_type": "call"
},
{
"api_name": "logic.Constants.Constants.NO_ACTIVE_LIVE_STREAM_SESSION_ERROR_MESSAGE",
"line_number": 1620,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 1620,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1622,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1624,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1624,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1624,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 1613,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1632,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.object_detection.src.models.label.get_labels_all",
"line_number": 1632,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1634,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1634,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1634,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 1627,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1643,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1645,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 1646,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 1646,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1646,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 1637,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1649,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 1649,
"usage_type": "name"
},
{
"api_name": "threading.enumerate",
"line_number": 1654,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.weather_station.weather_station_ros_publisher.main",
"line_number": 1665,
"usage_type": "attribute"
},
{
"api_name": "logic.algorithms.weather_station.weather_station_ros_publisher",
"line_number": 1665,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 1667,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants.START_WEATHER_DATA_PUBLISHER_SCRIPT",
"line_number": 1667,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 1667,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.weather_station.weather_station_ros_subscriber.main",
"line_number": 1670,
"usage_type": "attribute"
},
{
"api_name": "logic.algorithms.weather_station.weather_station_ros_subscriber",
"line_number": 1670,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1676,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1676,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1676,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1679,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 1679,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 1679,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1686,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1686,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1686,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1694,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1694,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1694,
"usage_type": "name"
},
{
"api_name": "models.Operation.DoesNotExist",
"line_number": 1697,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1697,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1735,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 1735,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 1735,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1743,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1743,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1743,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.filter",
"line_number": 1746,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1746,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1746,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 1747,
"usage_type": "call"
},
{
"api_name": "models.Count",
"line_number": 1757,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 1768,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1776,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1776,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 1785,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 1786,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 1798,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 1799,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point.Point",
"line_number": 1806,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 1806,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 1816,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1816,
"usage_type": "attribute"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1816,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1816,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 1817,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1817,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1817,
"usage_type": "name"
},
{
"api_name": "shutil.rmtree",
"line_number": 1819,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1819,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1819,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 1820,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1820,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1820,
"usage_type": "name"
},
{
"api_name": "shutil.move",
"line_number": 1821,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1821,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1821,
"usage_type": "name"
},
{
"api_name": "shutil.move",
"line_number": 1823,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1823,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1823,
"usage_type": "name"
},
{
"api_name": "os.mkdir",
"line_number": 1825,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1825,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1825,
"usage_type": "name"
},
{
"api_name": "shutil.copyfile",
"line_number": 1828,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1828,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1828,
"usage_type": "name"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1829,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1829,
"usage_type": "name"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 1834,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1834,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1834,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 1836,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1836,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1836,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 1838,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1838,
"usage_type": "attribute"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 1840,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 1840,
"usage_type": "name"
},
{
"api_name": "django.http.FileResponse",
"line_number": 1842,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1844,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 1844,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1844,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1847,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1847,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.water_collector.water_collector.publish_message",
"line_number": 1853,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.water_collector.water_collector",
"line_number": 1853,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1856,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1856,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1856,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1864,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1864,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1864,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1868,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1868,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1868,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1871,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1871,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.ballistic.ballistic.publish_message",
"line_number": 1877,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.ballistic.ballistic",
"line_number": 1877,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1880,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1880,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1880,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1888,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1888,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1888,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1892,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1892,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1892,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1895,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1895,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.range_detection.range_detection.buildMapPublisherSingleMessage",
"line_number": 1902,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.range_detection.range_detection",
"line_number": 1902,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1906,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1906,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1906,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1910,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1910,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1910,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1913,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1913,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 1927,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 1927,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 1927,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1932,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1932,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1932,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1935,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 1935,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1935,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1938,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1938,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 1941,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1942,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED",
"line_number": 1942,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1942,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1945,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1945,
"usage_type": "name"
},
{
"api_name": "models.Count",
"line_number": 1950,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 1955,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1956,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED",
"line_number": 1956,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1956,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1959,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 1959,
"usage_type": "name"
},
{
"api_name": "models.Count",
"line_number": 1965,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1971,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 1971,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1971,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 1972,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 1973,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED",
"line_number": 1973,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 1973,
"usage_type": "name"
},
{
"api_name": "math.tan",
"line_number": 1981,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 1981,
"usage_type": "attribute"
},
{
"api_name": "math.tan",
"line_number": 1982,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 1982,
"usage_type": "attribute"
},
{
"api_name": "geopy.distance.geodesic",
"line_number": 1983,
"usage_type": "call"
},
{
"api_name": "geopy.distance.geodesic",
"line_number": 1985,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 1992,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 1992,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 1992,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 2002,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2006,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2006,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2006,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2011,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2011,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2011,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 2013,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 2013,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 2013,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 2019,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2020,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 2028,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 2030,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 2036,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2037,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2040,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2040,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2040,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 2042,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 2042,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 2042,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 2048,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2049,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 2058,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 2060,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 2065,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2066,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2072,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.point.points.split",
"line_number": 2073,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point.points",
"line_number": 2073,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2073,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2082,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.point.points.split",
"line_number": 2083,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point.points",
"line_number": 2083,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2083,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2096,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2096,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2096,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2104,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 2104,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 2104,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2105,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 2105,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 2105,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 2108,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 2108,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 2108,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 2114,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2115,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2123,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.point.points.split",
"line_number": 2124,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point.points",
"line_number": 2124,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2124,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2133,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.point.points.split",
"line_number": 2134,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.point.points",
"line_number": 2134,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.geos.point",
"line_number": 2134,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 2145,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 2146,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 2149,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 2150,
"usage_type": "call"
},
{
"api_name": "open3d.geometry.PointCloud",
"line_number": 2153,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 2153,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.Vector3dVector",
"line_number": 2154,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 2154,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.Vector3dVector",
"line_number": 2155,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 2155,
"usage_type": "attribute"
},
{
"api_name": "open3d.geometry.TetraMesh.create_from_point_cloud",
"line_number": 2159,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 2159,
"usage_type": "attribute"
},
{
"api_name": "open3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape",
"line_number": 2162,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 2162,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 2171,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2171,
"usage_type": "attribute"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 2171,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 2171,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 2172,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 2172,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 2172,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 2173,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2173,
"usage_type": "attribute"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 2173,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 2173,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 2174,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 2174,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 2174,
"usage_type": "name"
},
{
"api_name": "open3d.io.write_triangle_mesh",
"line_number": 2176,
"usage_type": "call"
},
{
"api_name": "open3d.io",
"line_number": 2176,
"usage_type": "attribute"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 2177,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 2177,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2184,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2184,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2184,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 2194,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 2194,
"usage_type": "call"
},
{
"api_name": "forms.FlyingReportForm",
"line_number": 2194,
"usage_type": "call"
},
{
"api_name": "forms.FlyingReportForm",
"line_number": 2203,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.datetime.strptime",
"line_number": 2213,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.datetime",
"line_number": 2213,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 2213,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.datetime.strptime",
"line_number": 2215,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.datetime",
"line_number": 2215,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 2215,
"usage_type": "name"
},
{
"api_name": "logic.algorithms.flying_report.flying_report.main",
"line_number": 2219,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.flying_report.flying_report",
"line_number": 2219,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 2226,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 2226,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 2226,
"usage_type": "name"
},
{
"api_name": "django.core.files.storage.default_storage.path",
"line_number": 2228,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 2228,
"usage_type": "name"
},
{
"api_name": "django.http.FileResponse",
"line_number": 2229,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 2238,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 2238,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2241,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2241,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2241,
"usage_type": "name"
},
{
"api_name": "models.Operation.objects.get",
"line_number": 2245,
"usage_type": "call"
},
{
"api_name": "models.Operation.objects",
"line_number": 2245,
"usage_type": "attribute"
},
{
"api_name": "models.Operation",
"line_number": 2245,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 2246,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2249,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 2249,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 2249,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 2288,
"usage_type": "call"
},
{
"api_name": "logic.Constants.Constants.ONLINE_MAP_MODE",
"line_number": 2291,
"usage_type": "attribute"
},
{
"api_name": "logic.Constants.Constants",
"line_number": 2291,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 2294,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 2298,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 2298,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 2301,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.build_map.img_georeference.calcPoints",
"line_number": 2321,
"usage_type": "call"
},
{
"api_name": "logic.algorithms.build_map.img_georeference",
"line_number": 2321,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 2341,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 2342,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 2356,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 2356,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 2356,
"usage_type": "name"
}
] |
11165153113
|
from uuid import uuid4
from demo.data_loading.data_fetching import get_countries_data
from demo.data_loading.fixes import fix_alpha2_value, fix_alpha3_value, fix_string_value
from demo.server.config import get_pyorient_client
def load_countries_and_regions(countries_df):
graph = get_pyorient_client()
country_cls = graph.registry['Country']
region_cls = graph.registry['Region']
subarea_cls = graph.registry['GeographicArea_SubArea']
area_name_and_type_to_vertex = dict()
# Create all countries.
for _, country_item in countries_df.iterrows():
name = fix_string_value(country_item['CLDR display name'])
uuid = str(uuid4())
alpha2 = fix_alpha2_value(country_item['ISO3166-1-Alpha-2'])
alpha3 = fix_alpha3_value(country_item['ISO3166-1-Alpha-3'])
props = {
'name': name,
'uuid': uuid,
'alpha2': alpha2,
'alpha3': alpha3,
}
vertex = graph.create_vertex(country_cls, **props)
area_name_and_type_to_vertex[(name, 'Country')] = vertex
# Create all non-country regions.
for _, country_item in countries_df.iterrows():
for region_column in ('Intermediate Region Name', 'Sub-region Name', 'Region Name'):
name = fix_string_value(country_item[region_column])
if name is None or (name, 'Region') in area_name_and_type_to_vertex:
# Don't create regions with no name, or regions that were already added.
continue
uuid = str(uuid4())
props = {
'name': name,
'uuid': uuid,
}
vertex = graph.create_vertex(region_cls, **props)
area_name_and_type_to_vertex[(name, 'Region')] = vertex
# Create all relationships between countries/regions.
created_edges = set()
for _, country_item in countries_df.iterrows():
hierarchy_order = (
('CLDR display name', 'Country'),
('Intermediate Region Name', 'Region'),
('Sub-region Name', 'Region'),
('Region Name', 'Region'),
)
regions_in_order = [
(region_name, kind)
for region_name, kind in (
(fix_string_value(country_item[column_name]), kind)
for column_name, kind in hierarchy_order
)
if region_name is not None
]
for index, (parent_region_name, parent_region_kind) in enumerate(regions_in_order):
if index == 0:
continue
child_region_name, child_region_kind = regions_in_order[index - 1]
uniqueness_key = (
parent_region_name,
parent_region_kind,
child_region_name,
child_region_kind,
)
if uniqueness_key not in created_edges:
graph.create_edge(
subarea_cls,
area_name_and_type_to_vertex[(parent_region_name, parent_region_kind)],
area_name_and_type_to_vertex[(child_region_name, child_region_kind)])
created_edges.add(uniqueness_key)
# Link all currently parent-less regions to the World region.
all_region_names = set(area_name_and_type_to_vertex.keys())
all_regions_with_parents = {
(child_region_name, child_region_kind)
for _, _, child_region_name, child_region_kind in created_edges
}
all_regions_without_parents = all_region_names - all_regions_with_parents
world_vertex = graph.create_vertex(region_cls, name='World', uuid=str(uuid4()))
for region_name, region_kind in all_regions_without_parents:
graph.create_edge(
subarea_cls,
world_vertex,
area_name_and_type_to_vertex[(region_name, region_kind)])
def orientdb_load_all():
countries_df = get_countries_data()
load_countries_and_regions(countries_df)
if __name__ == '__main__':
orientdb_load_all()
|
obi1kenobi/graphql-compiler-cross-db-example
|
demo/data_loading/orientdb_loading.py
|
orientdb_loading.py
|
py
| 4,019 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "demo.server.config.get_pyorient_client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "demo.data_loading.fixes.fix_string_value",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "demo.data_loading.fixes.fix_alpha2_value",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "demo.data_loading.fixes.fix_alpha3_value",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "demo.data_loading.fixes.fix_string_value",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "demo.data_loading.fixes.fix_string_value",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "demo.data_loading.data_fetching.get_countries_data",
"line_number": 104,
"usage_type": "call"
}
] |
27264187100
|
"""
GenT2_Rulebase.py
Created 9/1/2022
"""
from juzzyPython.generalType2zSlices.system.GenT2Engine_Intersection import GenT2Engine_Intersection
from juzzyPython.generalType2zSlices.system.GenT2Engine_Union import GenT2Engine_Union
from juzzyPython.generalType2zSlices.system.GenT2_Rule import GenT2_Rule
from juzzyPython.intervalType2.system.IT2_Rulebase import IT2_Rulebase
from juzzyPython.generalType2zSlices.system.GenT2_Antecedent import GenT2_Antecedent
from typing import List, OrderedDict
from juzzyPython.testing.timeRecorder import timeDecorator
class GenT2_Rulebase():
"""
Class GenT2_Rulebase
Keeps track of rules and generates results
Parameters:
None
Functions:
addRule
addRules
getRules
getFuzzyLogicType
get_GenT2zEngine_Intersection
get_GenT2zEngineUnion
getOverallOutput
evaluateGetCentroid
evaluate
getIT2Rulebases
getRule
changeRule
removeRule
getNumberOfRules
containsRule
getRulesWithAntecedents
getImplicationMethod
setImplicationMethod
toString
"""
def __init__(self) -> None:
self.rules = []
self.outputs = []
self.DEBUG = False
self.CENTEROFSETS = 0
self.CENTROID = 1
self.implicationMethod = 1
self.PRODUCT = 0
self.MINIMUM = 1
self.gzEU = GenT2Engine_Union()
self.gzEI = GenT2Engine_Intersection()
def addRule(self,r: GenT2_Rule) -> None:
"""Add a new rule to the rule set"""
self.rules.append(r)
it = r.getConsequents()
for i in it:
o = i.getOutput()
if not o in self.outputs:
self.outputs.append(o)
def addRules(self,r: List[GenT2_Rule]) -> None:
"""Add multiple new rules to the rule set"""
for i in range(len(r)):
self.addRule(i)
def getRules(self) -> List[GenT2_Rule]:
"""Return all the rules in the set"""
return self.rules
def getRule(self,ruleNum: int) -> GenT2_Rule:
"""Return a specific rule"""
return self.rules[ruleNum]
def getNumberOfRules(self) -> int:
"""Get the number of rules in the set"""
return len(self.rules)
def getFuzzyLogicType(self) -> int:
"""Returns the type of fuzzy logic that is employed.
return 0: type-1, 1: interval type-2, 2: zSlices based general type-2"""
return 2
def containsRule(self,rule: GenT2_Rule) -> bool:
"""Check if a rule in the ruleset"""
return rule in self.rules
def getGenT2zEngineIntersection(self) -> GenT2Engine_Intersection:
"""Return the intersection engine"""
return self.gzEI
def getGenT2zEngineUnion(self) -> GenT2Engine_Union:
"""Return the union engine"""
return self.gzEU
def removeRule(self,ruleNumber: int) -> None:
"""Remove a rule based on its index"""
del self.rules[ruleNumber]
def getImplicationMethod(self) -> str:
"""Return if the implication is product or minimum"""
if self.implicationMethod == self.PRODUCT:
return "product"
else:
return "minimum"
def setImplicationMethod(self,implicationMethod: int) -> None:
"""Sets the implication method, where by implication, we mean the implementation
of the AND logical connective between parts of the antecedent.
The desired implication method is applied for all rules."""
if implicationMethod == self.PRODUCT:
self.implicationMethod = self.PRODUCT
elif implicationMethod == self.MINIMUM:
self.implicationMethod = self.MINIMUM
else:
raise Exception("Only product (0) and minimum (1) implication is currently supported.")
def toString(self) -> str:
"""Convert the class to string"""
s = "General Type-2 Fuzzy Logic System with "+str(self.getNumberOfRules())+" rules:\n"
for i in range(self.getNumberOfRules()):
s += str(self.rules[i].toString())+"\n"
return s
def getOverallOutput(self) -> dict:
"""Return the overall output of the rules"""
returnValue = OrderedDict()
for r in range(len(self.rules)):
temp = self.rules[r].getRawOutput()
for o in self.outputs:
if r == 0:
returnValue[o] = temp[o]
else:
returnValue[o] = self.gzEU.getUnion(returnValue.get(o),temp.get(o))
return returnValue
def evaluateGetCentroid(self,typeReductionType: int) -> dict:
"""Returns the output of the FLS after type-reduction, i.e. the centroid.
param: typeReductionType
return: A TreeMap where Output is used as key and the value is an Object[]
where Object[0] is a Tuple[] (the centroids, one per zLevel) and Object[1] is a Double holding the associated yValues for the centroids. If not rule fired for the given input(s),
then null is returned as an Object[]."""
returnValue = OrderedDict()
rbsIT2 = self.getIT2Rulebases()
zValues = self.rules[0].getAntecedents()[0].getSet().getZValues()
for i in range(len(rbsIT2)):
temp = rbsIT2[i].evaluateGetCentroid(typeReductionType)
for o in temp.keys():
if i == 0:
returnValue[o] = [[],[]]
returnValue[o][0].append(temp[o][0])
returnValue[o][1].append(zValues[i])
return returnValue
def evaluate(self,typeReductionType: int) -> dict:
"""The current evaluate function is functional but inefficient. It creates an IT2
version of all the rules in the rulebase and computes each IT2 rule separately...
param typeReductionType: 0: Center Of Sets, 1: Centroid
param discretizationLevel: The discretization level on the xAxis"""
returnValue = OrderedDict()
rbsIT2 = self.getIT2Rulebases()
rawOutputValues = []
for i in range(len(rbsIT2)):
rawOutputValues.append(rbsIT2[i].evaluate(typeReductionType))
zValues = self.rules[0].getAntecedents()[0].getSet().getZValues()
for o in self.outputs:
i=0
numerator = 0.0
denominator = 0.0
for outputValue in rawOutputValues:
numerator += outputValue[o] * zValues[i]
denominator += zValues[i]
i+= 1
returnValue[o] = numerator/denominator
return returnValue
def getIT2Rulebases(self) -> List[IT2_Rulebase]:
"""Returns the whole zSlices based rulebase as a series of interval type-2
rule bases (one per zLevel) which can then be computed in parallel.
param typeReductionMethod: The type-reduction method to be used at the IT2 level
0: Center Of Sets, 1: Centroid.
param discretizationLevelXAxis: The number of discretizations to be used at the IT2 level."""
rbs = [0] * self.rules[0].getAntecedents()[0].getSet().getNumberOfSlices()
for i in range(len(rbs)):
rbs[i] = IT2_Rulebase()
for currentRule in range(self.getNumberOfRules()):
rbs[i].addRule(self.rules[currentRule].getRuleasIT2Rules()[i])
rbs[i].setImplicationMethod(self.implicationMethod)
return rbs
def getRulesWithAntecedents(self,antecedents: List[GenT2_Antecedent]) -> List[GenT2_Rule]:
""" Returns all rules with a matching (i.e. equal) set of antecedents."""
matches = []
for i in range(len(self.rules)):
if self.rules[i].getAntecedents()==antecedents:
matches.append(self.rules[i])
return matches
|
LUCIDresearch/JuzzyPython
|
juzzyPython/generalType2zSlices/system/GenT2_Rulebase.py
|
GenT2_Rulebase.py
|
py
| 7,915 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2Engine_Union.GenT2Engine_Union",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2Engine_Intersection.GenT2Engine_Intersection",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Rule.GenT2_Rule",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Rule.GenT2_Rule",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Rule.GenT2_Rule",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Rule.GenT2_Rule",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Rule.GenT2_Rule",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2Engine_Intersection.GenT2Engine_Intersection",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2Engine_Union.GenT2Engine_Union",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "typing.OrderedDict",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "typing.OrderedDict",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "typing.OrderedDict",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "juzzyPython.intervalType2.system.IT2_Rulebase.IT2_Rulebase",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "juzzyPython.intervalType2.system.IT2_Rulebase.IT2_Rulebase",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Antecedent.GenT2_Antecedent",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.system.GenT2_Rule.GenT2_Rule",
"line_number": 199,
"usage_type": "name"
}
] |
40319534507
|
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import \
Session
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls import GeneralModule
class General(GeneralModule):
CMDS = {
'set': 'set',
'search': 'get',
}
API_KEY_PATH = 'proxy.forward'
API_KEY_PATH_REQ = API_KEY_PATH
API_MOD = 'proxy'
API_CONT = 'settings'
API_CONT_REL = 'service'
API_CMD_REL = 'reconfigure'
FIELDS_CHANGE = [
'interfaces', 'port', 'port_ssl', 'transparent', 'ssl_inspection',
'ssl_inspection_sni_only', 'ssl_ca', 'ssl_exclude', 'ssl_cache_mb',
'ssl_workers', 'allow_interface_subnets', 'snmp', 'port_snmp',
'snmp_password', 'interfaces_ftp', 'port_ftp', 'transparent_ftp',
]
FIELDS_ALL = FIELDS_CHANGE
FIELDS_TRANSLATE = {
'port_ssl': 'sslbumpport',
'transparent': 'transparentMode',
'ssl_inspection': 'sslbump',
'ssl_inspection_sni_only': 'sslurlonly',
'ssl_ca': 'sslcertificate',
'ssl_exclude': 'sslnobumpsites',
'ssl_cache_mb': 'ssl_crtd_storage_max_size',
'ssl_workers': 'sslcrtd_children',
'allow_interface_subnets': 'addACLforInterfaceSubnets',
'snmp': 'snmp_enable',
'port_snmp': 'snmp_port',
'interfaces_ftp': 'ftpInterfaces',
'port_ftp': 'ftpPort',
'transparent_ftp': 'ftpTransparentMode',
}
FIELDS_TYPING = {
'bool': [
'transparent_ftp', 'snmp', 'allow_interface_subnets', 'ssl_inspection_sni_only',
'ssl_inspection', 'transparent',
],
'list': ['interfaces', 'ssl_exclude', 'interfaces_ftp'],
'int': ['port', 'port_ssl', 'ssl_cache_mb', 'ssl_workers', 'port_snmp'],
'select': ['ssl_ca'],
}
FIELDS_IGNORE = ['acl', 'icap', 'authentication']
INT_VALIDATIONS = {
'ssl_workers': {'min': 1, 'max': 32},
'ssl_cache_mb': {'min': 1, 'max': 65535},
'port': {'min': 1, 'max': 65535},
'port_ssl': {'min': 1, 'max': 65535},
'port_snmp': {'min': 1, 'max': 65535},
}
FIELDS_DIFF_EXCLUDE = ['snmp_password']
def __init__(self, module: AnsibleModule, result: dict, session: Session = None):
GeneralModule.__init__(self=self, m=module, r=result, s=session)
|
ansibleguy/collection_opnsense
|
plugins/module_utils/main/webproxy_forward.py
|
webproxy_forward.py
|
py
| 2,388 |
python
|
en
|
code
| 158 |
github-code
|
6
|
[
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls.GeneralModule",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ansible.module_utils.basic.AnsibleModule",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api.Session",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls.GeneralModule.__init__",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls.GeneralModule",
"line_number": 62,
"usage_type": "name"
}
] |
71913785148
|
import qrcode as qr
from PIL import Image
q=qr.QRCode(version=1,
error_correction=qr.constants.ERROR_CORRECT_H,
box_size=10,
border=4,)
q.add_data("https://youtu.be/NaQ_4ZvCbOE")
q.make(fit=True)
img= q.make_image(fill_color='darkblue', back_color='steelblue')
img.save("x.png")
|
Xander1540/Python-Projects
|
QRcode/QRcode.py
|
QRcode.py
|
py
| 316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "qrcode.QRCode",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "qrcode.constants",
"line_number": 4,
"usage_type": "attribute"
}
] |
73730161788
|
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from dae.dae import DAE
from beta_vae.beta_vae import BetaVAE
from history import History
# hyperparameters
num_epochs = 100
batch_size = 128
lr = 1e-4
beta = 4
save_iter = 20
shape = (28, 28)
n_obs = shape[0] * shape[1]
# create DAE and ß-VAE and their training history
dae = DAE(n_obs, num_epochs, batch_size, 1e-3, save_iter, shape)
beta_vae = BetaVAE(n_obs, num_epochs, batch_size, 1e-4, beta, save_iter, shape)
history = History()
# fill autoencoder training history with examples
print('Filling history...', end='', flush=True)
transformation = transforms.Compose([
transforms.ColorJitter(),
transforms.ToTensor()
])
dataset = MNIST('data', transform=transformation)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True)
for data in dataloader:
img, _ = data
img = img.view(img.size(0), -1).numpy().tolist()
history.store(img)
print('DONE')
# train DAE
dae.train(history)
# train ß-VAE
beta_vae.train(history, dae)
|
BCHoagland/DARLA
|
train.py
|
train.py
|
py
| 1,115 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "dae.dae",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "dae.dae.DAE",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "beta_vae.beta_vae",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "beta_vae.beta_vae.BetaVAE",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "history.History",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ColorJitter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "history.store",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "dae.dae.train",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "dae.dae",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "beta_vae.beta_vae.train",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "dae.dae",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "beta_vae.beta_vae",
"line_number": 47,
"usage_type": "name"
}
] |
23012946135
|
import pandas as pd
import numpy as np
import geopandas as gpd
from helper_functions import add_subset_address_cols, interpolate_polygon
from data_constants import default_crs, make_data_dict
from name_parsing import combine_names
from address_parsing import clean_parse_address
from helper_functions import make_panel
from pathos.multiprocessing import ProcessingPool as Pool
import re
import fiona
import warnings
warnings.filterwarnings("ignore", 'This pattern has match groups')
# function for reading in corrupted gdb files. really only relevant for LA CAMS data
def readShp_nrow(path, numRows):
fiona_obj = fiona.open(str(path))
toReturn = gpd.GeoDataFrame.from_features(fiona_obj[0:numRows])
toReturn.crs = fiona_obj.crs
return (toReturn)
# classify & clean name columns+ clean & parse primary and mailing addresses
# function that runs code in parallel
def parallelize_dataframe(df:pd.DataFrame, func, n_cores=4) -> pd.DataFrame:
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
# have to include this to prevent leakage and allow multiple parallel function calls
pool.terminate()
pool.restart()
return df
# wrapper function to run each city in parallel
def clean_parse_parallel(df:pd.DataFrame) -> pd.DataFrame:
df = clean_parse_address(
dataframe=df, address_col='address_fa',st_name="address_sn", st_sfx="address_ss",
st_d="address_sd", unit='address_u', st_num='address_n1',
country='address_country', state='address_state', st_num2 ='address_n2',city='address_city',
zipcode='address_zip', prefix2='parsed_', prefix1='cleaned_'
)
return df
# ADDRESS CLEANING FUNCTIONS #
# takes an address df (geopandas or pandas), stanardizes and cleans it and returns a standardized pandas dataframe
# these functions get address dataframes to be in standardized formats (renamed columns, added variables, etc)
# such that the dataframe can be passed to clean_parse_parallel and exported
# see address cols in data constants for full list of necessary columns needed for clean_parse_parallel
# ill note if there is anything special with the function, but otherwise assume that it follows a standard flow of
# 1. rename columns -> add columns -> subset to only needed columns -> clean_parse_parrallel -> return
# chicago cleaning functions:
# chicago address files come in two seperate files that together represent a full set of addresses in cook county
# clean chi_add_points cleans a points file that represents centroid points for cook county parcel polygons
def clean_chi_add_points(df):
chicago_rename_dict = {
'ADDRNOCOM': 'address_n1',
'STNAMEPRD': 'address_sd',
'STNAME': 'address_sn',
'STNAMEPOT': 'address_ss',
'PLACENAME': 'address_city',
'ZIP5': 'address_zip',
'CMPADDABRV': 'address_fa',
'PIN': 'parcelID',
'XPOSITION': 'long',
'YPOSITION': 'lat'
}
df.rename(columns=chicago_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
# basically the same as address points, but these are for parcel polygons (lat long are centroid points, so it is
# basically equivalent, these just have some addresses not in the other df and vice versa
def clean_chi_add_parcels(df):
chicago_rename_dict = {
'property_address':'address_fa',
'property_city': 'address_city',
'property_zip': 'address_zip',
'pin': 'parcelID',
'latitude': 'lat',
'longitude': 'long'
}
df.rename(columns=chicago_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=4)
return df
def concat_chi_add(df1, df2):
df1 = df1.append(df2).drop_duplicates(subset = [
'parcelID',
"parsed_addr_n1",
"parsed_addr_sn",
"parsed_addr_ss",
"parsed_city"
])
return df1
# saint louis is a little strange because they provide parcel polygons for entire streets
# eg main st 100-900. This is fine for small streets as its not problematic to take centroid polygons, but
# it becomes an issue for larger streets. For larger streets I take a best guess on which way the street runs and
# linearly interpolate lat long between the bottom and top range of the address span
# so if main st 100-900 runs nw that means it has its smallest numbers in the south east and increases going north west
def clean_stl_add(df):
df = df.rename(
columns = {
"STREETNAME": "address_sn", "STREETTYPE": "address_ss", "PREDIR": "address_sd", "ZIP_CODE": "address_zip"
}
)
df['index'] = np.arange(df.shape[0])
df = df.to_crs(default_crs)
df.crs = default_crs
bounds = df.bounds
df['address_city'] = 'saint louis'
df['latitude_min'] = bounds["miny"]
df['latitude_max'] = bounds["maxy"]
df['longitude_min'] = bounds["minx"]
df['longitude_max'] = bounds["maxx"]
df['direction'] = np.where(
((df['FROMLEFT'] < df['TOLEFT']) & (df['FROMRIGHT'] < df['TORIGHT'])),
"NE",
np.where(
((df['FROMLEFT'] < df['TOLEFT']) & (df['FROMRIGHT'] > df['TORIGHT'])),
"NW",
np.where(
((df['FROMLEFT'] > df['TOLEFT']) & (df['FROMRIGHT'] < df['TORIGHT'])),
"SE",
np.where(
((df['FROMLEFT'] > df['TOLEFT']) & (df['FROMRIGHT'] > df['TORIGHT'])),
"SW",
"SW"
)
)
)
)
df_r = df[[col for col in df.columns if not bool(re.search("LEFT", col))]]
df_r['address_n1'] = np.where(
df_r['FROMRIGHT'] > df_r['TORIGHT'],
df_r['TORIGHT'],
df_r['FROMRIGHT']
)
df_r['address_n2'] = np.where(
df_r['TORIGHT'] > df_r['FROMRIGHT'],
df_r['TORIGHT'],
df_r['FROMRIGHT']
)
df_l = df[[col for col in df.columns if not bool(re.search("RIGHT", col))]]
df_l['address_n1'] = np.where(
df_l['FROMLEFT'] > df_l['TOLEFT'],
df_l['TOLEFT'],
df_l['FROMLEFT']
)
df_l['address_n2'] = np.where(
df_l['TOLEFT'] > df_l['FROMLEFT'],
df_l['TOLEFT'],
df_l['FROMLEFT']
)
df = pd.concat([df_r, df_l])
df = df[~((df['address_n1'] <= 0) & (df['address_n1'] <= 0))]
df = make_panel(df,start_year="address_n1", end_year="address_n2", current_year=df['address_n2'],
evens_and_odds=True ).rename(columns = {'year': 'address_n1'})
# interpolate lat long
df = interpolate_polygon(df, "index", "direction")
df['lat'] = df['lat_interpolated']
df['long'] = df["long_interpolated"]
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_la_add(df):
la_rename_dict = {
'AIN': 'parcelID',
'UnitName': 'address_u',
'Number': 'address_n1',
'PostType': 'address_ss',
'PreDirAbbr': 'address_sd',
'ZipCode': 'address_zip',
'LegalComm': 'address_city',
}
df.rename(columns=la_rename_dict, inplace=True)
combine_names(df, name_cols=['PreType', 'StArticle', 'StreetName'], newCol="address_sn")
df = df.to_crs(default_crs)
df.crs = default_crs
df['long'] = df.geometry.centroid.x
df['lat'] = df.geometry.centroid.y
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_sd_add(df):
sd_rename_dict = {
'addrunit': 'address_u',
'addrnmbr': 'address_n1',
'addrpdir':'address_sd',
'addrname': 'address_sn',
'addrsfx': 'address_ss',
'addrzip': 'address_zip',
'community': 'address_city',
'PIN': 'parcelID',
}
df.rename(columns=sd_rename_dict, inplace=True)
df = df.to_crs(default_crs)
df.crs = default_crs
df['long'] = df.geometry.centroid.x
df['lat'] = df.geometry.centroid.y
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_sf_add(df):
sf_rename_dict = {
"Parcel Number": 'parcelID',
'Unit Number': 'address_u',
'Address Number': 'address_n1',
'Street Name': 'address_sn',
'Street Type': 'address_ss',
'ZIP Code': 'address_zip',
'Address': 'address_fa',
#'PIN': 'parcelID',
'Longitude': 'long',
'Latitude': 'lat'
}
df.rename(columns=sf_rename_dict, inplace=True)
df['address_city'] = "San Francisco"
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_seattle_add(df):
seattle_rename_dict = {
'PIN': 'parcelID',
'ADDR_NUM': 'address_n1',
'ADDR_SN': 'address_sn',
'ADDR_ST': 'address_ss',
'ADDR_SD': 'address_sd',
'ZIP5': 'address_zip',
'CTYNAME': 'address_city',
'ADDR_FULL': 'address_fa',
'LON': 'long',
'LAT': 'lat'
}
df.rename(columns=seattle_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_orlando_add(df):
orlando_rename_dict = {
'OFFICIAL_P': 'parcelID',
"COMPLETE_A": 'address_fa',
"ADDRESS__1": 'address_n1',
"ADDRESS__2": "address_n2",
"BASENAME": "address_sn",
"POST_TYPE":"address_ss",
"POST_DIREC": "address_sd",
"MUNICIPAL_": 'address_city',
"ZIPCODE": "address_zip",
"LATITUDE": "lat",
"LONGITUDE": "long",
}
df.rename(columns=orlando_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_baton_rouge_add(df):
baton_rouge_rename_dict = {
'ADDRNOCOM': 'address_n1',
'ASTREET PREFIX DIRECTION': 'address_sd',
'STREET NAME': 'address_sn',
'STREET SUFFIX TYPE': 'address_ss',
'CITY': 'address_city',
'ZIP': 'address_zip',
'FULL ADDRESS': 'address_fa'
}
df.rename(columns=baton_rouge_rename_dict, inplace=True)
lat_long = df['GEOLOCATION'].str.extract('([0-9\.]+),([0-9\.]+)')
df['lat'] = lat_long.iloc[:,0]
df['long'] = lat_long.iloc[:,1]
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=4)
return df
def merge_sac_parcel_id(sac_add = pd.DataFrame, xwalk = pd.DataFrame):
return pd.merge(
sac_add,
xwalk[xwalk['Parcel_Number'].notna()][["Address_ID", "Parcel_Number"]].drop_duplicates(),
left_on = "Address_ID", right_on = "Address_ID", how = "left"
)
def clean_sac_add(df):
sac_rename_dict = {
'APN': 'parcelID',
"Address_Number": 'address_n1',
"Street_Name": "address_sn",
"Street_Suffix":"address_ss",
"Pre_Directiona;": "address_sd",
"Postal_City": 'address_city',
"Zip_Code": "address_zip",
"Latitude_Y": "lat",
"Longitude_X": "long",
}
df.rename(columns=sac_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
# used to reclean data in the event that you dont want to read in a shapefile
# mostly uses because its faster to read in a csv than a shp
def clean_int_addresses(df):
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
if __name__ == "__main__":
print("hello")
data_dict = make_data_dict(use_seagate=False)
# stl_add = gpd.read_file(data_dict['raw']['stl']['parcel'] + 'streets/tgr_str_cl.shp')
# stl_add = clean_stl_add(stl_add)
# stl_add.to_csv(data_dict['intermediate']['stl']['parcel'] + 'addresses.csv', index=False)
# baton_rouge_add = pd.read_csv(
# data_dict['raw']['baton_rouge']['parcel'] + 'addresses_Property_Information_ebrp.csv')
# baton_rouge_add = clean_baton_rouge_add(baton_rouge_add)
# baton_rouge_add.to_csv(data_dict['intermediate']['baton_rouge']['parcel'] + 'addresses.csv', index=False)
# chicago_add1 = pd.read_csv(data_dict['raw']['chicago']['parcel'] + 'Cook_County_Assessor_s_Property_Locations.csv')
# chicago_add2 = pd.read_csv(data_dict['raw']['chicago']['parcel'] + 'Address_Points_cook_county.csv')
# orlando_add = gpd.read_file(data_dict['raw']['orlando']['parcel'] + "Address Points/ADDRESS_POINT.shp")
# clean_orlando_add(orlando_add).to_csv(data_dict['intermediate']['orlando']['parcel'] + 'addresses.csv', index=False)
# la_add = gpd.read_file("/Users/JoeFish/Desktop/la_addresspoints.gdb", nrows = 100)
# la_add = pd.read_csv(data_dict['intermediate']['la']['parcel'] + 'addresses.csv')
# file is corrupted so we have to read it in this way...
# print(la_add.head())
#sd_add = gpd.read_file(data_dict['raw']['sd']['parcel'] + 'addrapn_datasd_san_diego/addrapn_datasd.shp')
# sf_add = pd.read_csv(
# data_dict['raw']['sf']['parcel'] + 'Addresses_with_Units_-_Enterprise_Addressing_System_san_francisco.csv')
# seattle_add = gpd.read_file(data_dict['raw']['seattle']['parcel'] +
# 'Addresses_in_King_County___address_point/Addresses_in_King_County___address_point.shp')
#
# # clean_baton_rouge_add(baton_rouge_add).to_csv(data_dict['intermediate']['baton_rouge']['parcel'] + 'addresses.csv', index=False)
# clean_chi_add2(chicago_add1).to_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_parcels.csv', index=False)
# clean_chi_add1(chicago_add2).to_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_points.csv', index=False)
# clean_int_addresses(la_add).to_csv(data_dict['intermediate']['la']['parcel'] + 'addresses_temp.csv', index=False)
# clean_sf_add(sf_add).to_csv(data_dict['intermediate']['sf']['parcel'] + 'addresses.csv', index=False)
# #clean_sd_add(sd_add).to_csv(data_dict['intermediate']['sd']['parcel'] + 'addresses.csv', index=False)
# clean_seattle_add(seattle_add).to_csv(data_dict['intermediate']['seattle']['parcel'] + 'addresses.csv', index=False)
# chi1 = pd.read_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_parcels.csv', dtype={"parsed_addr_n1": str})
# chi2 = pd.read_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_points.csv', dtype={"parsed_addr_n1": str})
# concat_chi_add(chi1,chi2).to_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_concat.csv', index=False)
sac_add = pd.read_csv(data_dict['raw']['sac']['parcel'] + 'Address.csv')
sac_xwalk = pd.read_csv(data_dict['raw']['sac']['parcel'] + 'Address_parcel_xwalk.csv')
sac_add = merge_sac_parcel_id(sac_add=sac_add, xwalk=sac_xwalk)
clean_sac_add(sac_add).to_csv(data_dict['intermediate']['sac']['parcel'] + 'addresses_concat.csv', index=False)
pass
|
jfish-fishj/boring_cities
|
python_modules/clean_address_data.py
|
clean_address_data.py
|
py
| 15,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fiona.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame.from_features",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.array_split",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pathos.multiprocessing.ProcessingPool",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "address_parsing.clean_parse_address",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "data_constants.default_crs",
"line_number": 123,
"usage_type": "argument"
},
{
"api_name": "data_constants.default_crs",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "helper_functions.make_panel",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "helper_functions.interpolate_polygon",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "name_parsing.combine_names",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "data_constants.default_crs",
"line_number": 198,
"usage_type": "argument"
},
{
"api_name": "data_constants.default_crs",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "data_constants.default_crs",
"line_number": 219,
"usage_type": "argument"
},
{
"api_name": "data_constants.default_crs",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "pandas.merge",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "helper_functions.add_subset_address_cols",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "data_constants.make_data_dict",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 380,
"usage_type": "call"
}
] |
47036004516
|
import time
from sqlalchemy import Column, Integer, String, Float, Boolean, ForeignKey
import sqlalchemy.types as types
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import func
from sqlalchemy import or_, and_, desc
from marshmallow import Schema, fields
from database import Base
class KycRequestSchema(Schema):
date = fields.Float()
token = fields.String()
greenid_verification_id = fields.String()
status = fields.String()
class KycRequest(Base):
__tablename__ = 'kyc_requests'
id = Column(Integer, primary_key=True)
date = Column(Float, nullable=False, unique=False)
token = Column(String, nullable=False, unique=True)
greenid_verification_id = Column(String, nullable=False, unique=True)
status = Column(String )
def __init__(self, token, greenid_verification_id):
self.date = time.time()
self.token = token
self.greenid_verification_id = greenid_verification_id
self.status = "created"
@classmethod
def count(cls, session):
return session.query(cls).count()
@classmethod
def from_token(cls, session, token):
return session.query(cls).filter(cls.token == token).first()
def __repr__(self):
return '<KycRequest %r>' % (self.token)
def to_json(self):
schema = KycRequestSchema()
return schema.dump(self).data
|
djpnewton/zap-merchant
|
models.py
|
models.py
|
py
| 1,387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "marshmallow.Schema",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Float",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.String",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.String",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.String",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "database.Base",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
}
] |
35406045180
|
import json
import os
from elasticsearch import Elasticsearch, helpers, exceptions
client = Elasticsearch(os.getenv("ELASTICSEARCH_URL"))
f = open("dump", "r")
def main():
while True:
line = f.readline()
if len(line) == 0:
break
data = json.loads(line)
yield {
'_op_type': 'index',
'_index': 'data',
'_id': data["id"],
'doc': data
}
helpers.bulk(client, main(), stats_only=True, chunk_size=2000)
|
polianax/regex
|
upload.py
|
upload.py
|
py
| 506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "elasticsearch.helpers.bulk",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "elasticsearch.helpers",
"line_number": 24,
"usage_type": "name"
}
] |
11016530679
|
import os
import discord
import requests
import asyncio
from dotenv import load_dotenv
from discord.utils import get
from discord.ext import commands
compteur = 301
nbConnected = 0
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
SERVER_IP = os.getenv('SERVER_IP')
SERVER_PORT = os.getenv('SERVER_PORT')
CHANNEL_ID = int(os.getenv('CHANNEL_ID'))
VOCAL_ID = int(os.getenv('VOCAL_ID'))
bot = commands.Bot(command_prefix="!")
# Background task
async def background_task():
global compteur
await bot.wait_until_ready()
while not bot.is_closed():
await call_api()
await asyncio.sleep(30)
compteur += 30
async def call_api():
global nbConnected
global compteur
for guild in bot.guilds:
if (guild.id == CHANNEL_ID):
channel = discord.utils.get(guild.channels, id=VOCAL_ID)
response = requests.get('https://minecraft-api.com/api/ping/online/' + SERVER_IP + '/' + str(SERVER_PORT))
nbConnected2 = response.content.decode("utf-8")
if nbConnected != nbConnected2 and compteur > 300:
nbConnected = nbConnected2
message = 'Il y a ' + str(nbConnected) + (' connectés' if int(nbConnected) > 1 else ' connecté')
compteur = 0
await channel.edit(name=message)
bot.loop.create_task(background_task())
# Start bot
bot.run(TOKEN)
|
AudricCh/minecraft-discord-bot
|
bot/main.py
|
main.py
|
py
| 1,395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "discord.utils.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
}
] |
10609649346
|
from createProtocol import ARP, EthernetII
from parseProtocol import Parser
from optparse import OptionParser
from helper import subnet_creator, get_mac_address, get_ip_address
from rich.progress import track
from time import sleep
import socket
import netifaces
import threading
def get_user_parameters():
parse_options = OptionParser()
parse_options.add_option("-n", "--network", dest="sub_network", help="Enter Network Address \n[+] Example : 192.168.1.0/24")
parse_options.add_option("-i", "--interface", dest="interface", help="Enter Your Interface")
options, _ = parse_options.parse_args()
interfaces = netifaces.interfaces()
if not options.interface and not options.sub_network:
print("\nPlease enter parameters. You can use '--help' for parameters.")
if options.interface not in interfaces:
print("\nThere is no such interface.")
if not options.sub_network:
print("\nEnter network address.")
return options
def send_packet(interface, ip):
ethernet = EthernetII(src_mac=get_mac_address(interface))
arp = ARP(dst_mac="00:00:00:00:00:00", src_mac=get_mac_address(interface), src_ip=get_ip_address(interface))
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0806))
sock.bind((interface, 0x0806))
arp._dst_ip = ip
packet = ethernet() + arp()
sock.send(packet)
def receive_packet(interface):
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0806))
sock.bind((interface, 0x0806))
parser = Parser()
while True:
data, _ = sock.recvfrom(65535)
_, _, _, otherData = parser.ethernetFrame(data)
opcode, dst_mac, dst_ip, src_mac, src_ip = parser.arp_frame(otherData)
if opcode == 2:
parser.print_frame(dst_mac=dst_mac, dst_ip=dst_ip)
def main():
user_params = get_user_parameters()
user_network = user_params.sub_network
user_interface = user_params.interface
ip_list = subnet_creator(user_network)
receive_thread = threading.Thread(target=receive_packet, args=(user_interface,), daemon=True)
receive_thread.start()
sleep(1.5)
for ip in track(ip_list, "Sending Packet => "):
send_packet(user_interface,ip)
if __name__ == "__main__":
main()
|
oguzhan-kurt/Network-Scanner
|
main.py
|
main.py
|
py
| 2,295 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "optparse.OptionParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "netifaces.interfaces",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "createProtocol.EthernetII",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "helper.get_mac_address",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "createProtocol.ARP",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "helper.get_mac_address",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "helper.get_ip_address",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "socket.AF_PACKET",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_RAW",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "socket.htons",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "socket.AF_PACKET",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_RAW",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "socket.htons",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "parseProtocol.Parser",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "helper.subnet_creator",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "rich.progress.track",
"line_number": 68,
"usage_type": "call"
}
] |
27055803559
|
"""empty message
Revision ID: 810e0afb57ea
Revises: 22771e69d10c
Create Date: 2022-01-19 19:59:08.027108
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "810e0afb57ea"
down_revision = "22771e69d10c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"techstack",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("technology", sa.String(length=50), nullable=False),
sa.Column("label", sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("technology"),
)
op.create_table(
"team_techstack",
sa.Column("team_id", sa.Integer(), nullable=True),
sa.Column("techstack_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["team_id"],
["team.id"],
),
sa.ForeignKeyConstraint(
["techstack_id"],
["techstack.id"],
),
)
op.create_table(
"user_skill",
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("techstack_id", sa.Integer(), nullable=False),
sa.Column("skill_level", sa.Integer(), nullable=True),
sa.Column("is_learning_goal", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["techstack_id"],
["techstack.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("user_id", "techstack_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("user_skill")
op.drop_table("team_techstack")
op.drop_table("techstack")
# ### end Alembic commands ###
|
CodeForPoznan/codeforpoznan.pl_v3
|
backend/migrations/versions/810e0afb57ea_.py
|
810e0afb57ea_.py
|
py
| 1,891 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.UniqueConstraint",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_table",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_table",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 64,
"usage_type": "name"
}
] |
26964335493
|
from setuptools import setup
from hostinfo.version import __version__ as VERSION
from build_utils import BuildCommand
from build_utils import PublishCommand
from build_utils import BinaryDistribution
PACKAGE_NAME = 'pimjpeg'
BuildCommand.pkg = PACKAGE_NAME
# BuildCommand.py2 = False
# BuildCommand.py3 = False
PublishCommand.pkg = PACKAGE_NAME
PublishCommand.version = VERSION
README = open('README.rst').read()
GITHUB = "https://github.com/walchko/{}".format(PACKAGE_NAME)
INSTALL_REQ = open("requirements.txt").readlines()
TEST_REQ = ['nose']
CMDS = {'publish': PublishCommand, 'make': BuildCommand}
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Kevin J. Walchko",
keywords=['package', 'keywords'],
author_email="[email protected]",
description="raspbery pi camera mjpeg streamer",
license="MIT",
package_data={
'package': ['templates', 'static'],
},
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: System :: Shells',
'Environment :: Console'
],
install_requires=INSTALL_REQ,
tests_require=TEST_REQ,
url=GITHUB,
long_description=README,
cmdclass=CMDS,
packages=[PACKAGE_NAME],
# scripts=[
# 'bin/hello.py'
# ]
)
|
walchko/mjpeg
|
setup.py
|
setup.py
|
py
| 1,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "build_utils.BuildCommand.pkg",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "build_utils.BuildCommand",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "build_utils.PublishCommand.pkg",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "build_utils.PublishCommand",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "build_utils.PublishCommand.version",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "build_utils.PublishCommand",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "hostinfo.version.__version__",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "build_utils.PublishCommand",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "build_utils.BuildCommand",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "setuptools.setup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "hostinfo.version.__version__",
"line_number": 23,
"usage_type": "name"
}
] |
36766609482
|
# date: 2021/09/06
# link: https://programmers.co.kr/learn/courses/30/lessons/17680
from collections import deque
def solution(cacheSize, cities):
answer = 0
status = deque()
if cacheSize == 0:
answer = len(cities) * 5
else:
for city in cities:
city = city.upper()
if city in status:
answer += 1
status.remove(city)
else:
answer += 5
if len(status) >= cacheSize:
status.popleft()
status.append(city)
return answer
|
jiyoung-dev/Algorithm
|
Kakao 기출문제/캐시.py
|
캐시.py
|
py
| 608 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
3653572970
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage.color import rgb2lab
from skimage.color import lab2rgb
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
import sys
# representative RGB colours for each label, for nice display
COLOUR_RGB = {
'red': (255, 0, 0),
'orange': (255, 114, 0),
'yellow': (255, 255, 0),
'green': (0, 230, 0),
'blue': (0, 0, 255),
'purple': (187, 0, 187),
'brown': (117, 60, 0),
'pink': (255, 187, 187),
'black': (0, 0, 0),
'grey': (150, 150, 150),
'white': (255, 255, 255),
}
name_to_rgb = np.vectorize(COLOUR_RGB.get, otypes=[np.uint8, np.uint8, np.uint8])
def plot_predictions(model, lum=71, resolution=256):
"""
Create a slice of LAB colour space with given luminance; predict with the model; plot the results.
"""
wid = resolution
hei = resolution
n_ticks = 5
# create a hei*wid grid of LAB colour values, with L=lum
ag = np.linspace(-100, 100, wid)
bg = np.linspace(-100, 100, hei)
aa, bb = np.meshgrid(ag, bg)
ll = lum * np.ones((hei, wid))
lab_grid = np.stack([ll, aa, bb], axis=2)
# convert to RGB for consistency with original input
X_grid = lab2rgb(lab_grid)
# predict and convert predictions to colours so we can see what's happening
y_grid = model.predict(X_grid.reshape((wid*hei, 3)))
pixels = np.stack(name_to_rgb(y_grid), axis=1) / 255
pixels = pixels.reshape((hei, wid, 3))
# plot input and predictions
plt.figure(figsize=(10, 5))
plt.suptitle('Predictions at L=%g' % (lum,))
plt.subplot(1, 2, 1)
plt.title('Inputs')
plt.xticks(np.linspace(0, wid, n_ticks), np.linspace(-100, 100, n_ticks))
plt.yticks(np.linspace(0, hei, n_ticks), np.linspace(-100, 100, n_ticks))
plt.xlabel('A')
plt.ylabel('B')
plt.imshow(X_grid.reshape((hei, wid, 3)))
plt.subplot(1, 2, 2)
plt.title('Predicted Labels')
plt.xticks(np.linspace(0, wid, n_ticks), np.linspace(-100, 100, n_ticks))
plt.yticks(np.linspace(0, hei, n_ticks), np.linspace(-100, 100, n_ticks))
plt.xlabel('A')
plt.imshow(pixels)
#to convert rgb to lab
def rgb_to_lab(X):
X = pd.DataFrame(X)
#print(X)
X = X.values.reshape(1, -1, 3)
X = rgb2lab(X)
X = X.reshape(-1,3)
return X
def main(infile):
#def main():
data = pd.read_csv(infile)
data = pd.read_csv("colour-data.csv")
#print(data)
X = data[['R', 'G', 'B']] # array with shape (n, 3). Divide by 255 so components are all 0-1.
#print(X)
X = X/255
X = X.values.tolist()
#print(X)
#https://stackoverflow.com/questions/34165731/a-column-vector-y-was-passed-when-a-1d-array-was-expected
y = data[['Label']].values.ravel() # array with shape (n,) of colour words.
#print(y)
# TODO: build model_rgb to predict y from X.
# TODO: print model_rgb's accuracy score
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
model_rgb = GaussianNB()
model_rgb.fit(X_train, y_train)
y_predicted = model_rgb.predict(X_valid)
print(model_rgb.score(X_valid, y_valid))
# TODO: build model_lab to predict y from X by converting to LAB colour first.
# TODO: print model_lab's accuracy score
#We can create a pipeline model where the first step is a transformer that converts from RGB to LAB, and the second is a Gaussian classifier, exactly as before.
model_lab = make_pipeline(
FunctionTransformer(rgb_to_lab, validate = False),
GaussianNB()
)
model_lab.fit(X_train, y_train)
lab_y_predicted = model_lab.predict(X_valid)
print(model_lab.score(X_valid, y_valid))
plot_predictions(model_rgb)
plt.savefig('predictions_rgb.png')
plot_predictions(model_lab)
plt.savefig('predictions_lab.png')
if __name__ == '__main__':
main(sys.argv[1])
#main()
|
injoon2019/SFU_CMPT353
|
Exercise/e7/colour_bayes.py
|
colour_bayes.py
|
py
| 4,009 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.vectorize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "skimage.color.lab2rgb",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "skimage.color.rgb2lab",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.make_pipeline",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.FunctionTransformer",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 125,
"usage_type": "attribute"
}
] |
74795986426
|
import openpyxl
import tkinter as tk
def add_data_to_excel(roll_number, name):
# Open the Excel file or create a new one if it doesn't exist
try:
workbook = openpyxl.load_workbook('data.xlsx')
except FileNotFoundError:
workbook = openpyxl.Workbook()
# Select the active sheet (default: first sheet)
sheet = workbook.active
# Append the data to the Excel sheet
row = [roll_number, name]
sheet.append(row)
# Save the changes to the Excel file
workbook.save('data.xlsx')
def on_submit():
roll_number = roll_entry.get()
name = name_entry.get()
try:
add_data_to_excel(roll_number, name)
result_label.config(text="Data successfully stored in Excel!", fg="green")
except Exception as e:
result_label.config(text=f"Error occurred: {e}", fg="red")
# Create the tkinter window
root = tk.Tk()
root.title("Data Entry")
# Labels and Entry widgets for roll number and name
roll_label = tk.Label(root, text="Roll Number:")
roll_label.pack()
roll_entry = tk.Entry(root)
roll_entry.pack()
name_label = tk.Label(root, text="Name:")
name_label.pack()
name_entry = tk.Entry(root)
name_entry.pack()
submit_button = tk.Button(root, text="Submit", command=on_submit)
submit_button.pack()
result_label = tk.Label(root, text="", fg="green")
result_label.pack()
# Run the tkinter main loop
root.mainloop()
|
Chandravarma2004/Push-the-data-given-to-excel-
|
project3.py
|
project3.py
|
py
| 1,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "openpyxl.load_workbook",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 49,
"usage_type": "call"
}
] |
5508352220
|
import random, os, shutil, yaml, gzip
import pandas as pd
import numpy as np
import prepare.configs as configs
from google.cloud import storage
import pickle
import time
storage_client = storage.Client()
bucket = storage_client.bucket(configs.bucketName)
def encodeConfigs(_confs):
return [
_confs['sim time settings']['time step'],
_confs['sim time settings']['total time'],
_confs['sim time settings']['sampling rate'],
int(_confs['change Na mem']['event happens']),
_confs['change Na mem']['change start'],
_confs['change Na mem']['change finish'],
_confs['change Na mem']['change rate'],
_confs['change Na mem']['multiplier'],
int(_confs['change K mem']['event happens']),
_confs['change K mem']['change start'],
_confs['change K mem']['change finish'],
_confs['change K mem']['change rate'],
_confs['change K mem']['multiplier']
]
def generateDataset():
srcFolder = "storage/processed"
destFolder = "storage/ready"
inputMaxCellsNumber = 250
retryCounter = 0
# Fetch available simulation folders
runsIdxs = []
for blob in bucket.list_blobs(prefix=srcFolder):
folderName = blob.name.split("/")
if (folderName[2] not in runsIdxs and folderName[2] != ""):
runsIdxs.append(folderName[2])
# Ftech simulation folders already processed
with open("./prepare/processed.txt","r") as f:
processedRunsIdxs = f.readlines()
processedRunsIdxs = [folder.strip() for folder in processedRunsIdxs]
availableFolders = []
for runIdx in runsIdxs:
if (runIdx not in processedRunsIdxs):
availableFolders.append(runIdx)
print("[GENERATE DATASET] Folders {} | Processed {} | Left {}".format(len(runsIdxs), len(processedRunsIdxs), len(availableFolders)))
for i, runFolderIdx in enumerate(availableFolders):
# Keep track of the progress
if (i in [int(len(availableFolders)*0.25), int(len(availableFolders)*0.5), int(len(availableFolders)*0.75)]):
print(">> {} %".format(int(i / len(availableFolders) * 100)))
try:
data = pd.read_csv('gs://{}/{}/{}/simulation.csv'.format(configs.bucketName, srcFolder, runFolderIdx))
print(">> {} | {}".format(runFolderIdx, data['folderName'][0]))
# 1. Download Sim Config File and encode It
fileDest = '/tmp/rawSimConfig.yml'
bucket.blob('storage/raw/{}/configs.yml'.format(data['folderName'][0])).download_to_filename(fileDest)
with open(fileDest, 'r') as stream:
simConfigRaw = yaml.safe_load(stream)
simConfigsEncoded = np.asarray(encodeConfigs(simConfigRaw))
simConfigsEncoded = np.append(simConfigsEncoded, [0]) # Add timestamp information
# 2. Download Sim.betse File and open it ( to extract Membrane permeabilities values)
fileDest = '/tmp/sim_1.betse.gz'
bucket.blob('storage/raw/{}/sim_1.betse.gz'.format(data['folderName'][0])).download_to_filename(fileDest)
with gzip.open(fileDest, "rb") as f:
sim, cells, params = pickle.load(f)
# 3. Generate training examples files. One for each simulation timestep using sim config, sim.betse & vmems
for timestampIdx in range(len(sim.time)):
inputVmem = np.asarray(data[data['timestamp'] == timestampIdx]['vmem'])
outputVmem = np.asarray(data[data['timestamp'] == timestampIdx + 1]['vmem'])
# Update timestamp information
simConfigsEncoded[simConfigsEncoded.shape[0] - 1] = timestampIdx
# 1. Compute cells perms values from cells membranes perms values. From {3, 6} values to 1 (average)
cellsPopulationSize = inputVmem.shape[0]
cells_mems = [[] for cell in range(cellsPopulationSize)]
for memUniqueIdx, cellIdx in enumerate(cells.mem_to_cells):
cells_mems[cellIdx].append(sim.dd_time[timestampIdx][:, memUniqueIdx])
cells_permeabilities = []
for cellMembranes in cells_mems:
cells_permeabilities.append(np.mean(cellMembranes, axis=0))
cells_permeabilities = np.asarray(cells_permeabilities) # N, 4 # K, Na, M-, Proteins-
# concat Vmem values with perms values
inputVmem = np.concatenate((inputVmem.reshape((-1, 1)), cells_permeabilities), axis=1) # N, 5
# concat cells centers to input vector
inputVmem = np.concatenate((inputVmem, cells.cell_centres), axis=1) # N, 7
# Concat env concentrations
env_cc = np.transpose(sim.cc_env_time[timestampIdx])[ : inputVmem.shape[0]] # get only same shape as inputVmem since env cc all the same
inputVmem = np.concatenate((inputVmem, env_cc), axis=1) # N, 11
# Concat cytosilic concentrations
cytosolic_cc = np.transpose(sim.cc_time[timestampIdx])
inputVmem = np.concatenate((inputVmem, cytosolic_cc), axis=1) # N, 15
#Pad Input
'''
TODO:
- Not pad with 0 since it is a possible Vmem value.
'''
if (inputVmem.shape[0] < inputMaxCellsNumber):
inputVmemPad = np.zeros((inputMaxCellsNumber, inputVmem.shape[1]))
inputVmemPad[:inputVmem.shape[0]] = inputVmem
inputVmem = inputVmemPad
outputVmemPad = np.zeros((inputMaxCellsNumber))
outputVmemPad[:outputVmem.shape[0]] = outputVmem
outputVmem = outputVmemPad
#Discard Input
elif (inputVmem.shape[0] > inputMaxCellsNumber):
print("<<ATTENTION>> Found Input with Numbers of cells higher that current Max: {} > {}".format(inputVmem.shape[0], inputMaxCellsNumber))
continue
# Discard example if data
# - Vmem < - 100 || > 100
# - K_env, Na_env, M_env, X_env, K_cc, Na_cc, M_cc, X_cc > 1000
#
if (np.any(inputVmem[:, 0] < -100) or np.any(inputVmem[:, 0] > 100)):
print("Discard example, Vmem {}".format(np.max(np.abs(inputVmem))))
continue
if (np.any(inputVmem[: , 7:] > 1000)):
print("Discard example, Concentration {}".format(np.max(inputVmem[: , 7:])))
continue
if (np.any(outputVmem[:, 0] < -100) or np.any(outputVmem[:, 0] > 100)):
print("Discard example, Vmem Output {}".format(np.max(np.abs(outputVmem))))
continue
#print("inputVmem length: {}".format(inputVmem.shape[0]))
#print("Configs length: {}".format(configs.shape[0]))
#print("outputVmem length: {}".format(outputVmem.shape[0]))
filePath = '/tmp/example.npy'
np.save(filePath, np.asarray([
inputVmem,
simConfigsEncoded,
outputVmem
], dtype="object"))
blob = bucket.blob('{}/{}/{}.npy'.format(destFolder, runFolderIdx, timestampIdx))
blob.upload_from_filename(filePath)
retryCounter = 0
with open("./prepare/processed.txt","a+") as f:
f.write(runFolderIdx + "\n")
# If for some reason processing fails. Handle it. It will not save on the processed.txt allowing to be processed at the next restart
except:
print("Handle Excpetion | Sleeping for {}".format(2 ** retryCounter))
time.sleep(2 ** retryCounter) # sleep since may be due to too many requests
retryCounter += 1
continue
|
R-Stefano/betse-ml
|
prepare/utils.py
|
utils.py
|
py
| 7,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "google.cloud.storage.Client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "prepare.configs.bucketName",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "prepare.configs",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "prepare.configs.bucketName",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "prepare.configs",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 162,
"usage_type": "call"
}
] |
35395847394
|
from django.core.paginator import InvalidPage
class AlphabetGlossary(object):
"""Алфавитный глоссарий"""
def __init__(self, object_list, on=None, num_groups=7):
self.object_list = object_list # список объектов
self.count = len(object_list) # количество объектов в списке
self.max_froups = num_groups # количество алфавитных групп
self.groups = [] # список алфавитных групп
# Словарь, в котором ключ - буква алфавита, а значение - список объектов на эту букву из object_list
chunks = {}
for obj in self.object_list:
if on:
obj_str = str(getattr(obj, on))
else:
obj_str = str(obj)
letter = str.upper(obj_str[0])
if letter not in chunks:
chunks[letter] = []
chunks[letter].append(obj)
# Вычисляем предполагаемое количество объектов в алфавитной группе
per_group = self.count / num_groups
for letter in chunks:
chunk_len = len(chunks[letter])
if chunk_len > per_group:
per_group = chunk_len
# Распределяем объекты по алфавитным группам
current_group = AlphabetGroup(self)
for letter in sorted(chunks.keys()):
sub_list = chunks[letter] # элементы списка объектов на указанную букву
# Определяем, уместится ли sub_list в текущую алфавитную группу, или его
# нужно переносить в новую. Новая группа будет создана, если:
# - добавление sub_list приведёт к переполнению текущей группы
# - в текущей группе свободного места меньше, чем количество неумещающихся объектов
# - текущая группа не пуста (в противном случае, это будет означать, что len(sub_list) > per_group
new_group_count = len(sub_list) + current_group.count
if new_group_count > per_group and \
abs(per_group - current_group.count) < abs(per_group - new_group_count) and \
current_group.count > 0:
self.groups.append(current_group)
current_group = AlphabetGroup(self)
current_group.add(sub_list, letter)
# Если по окончании цикла осталась непустая группа, добавляем её в глоссарий
if current_group.count > 0:
self.groups.append(current_group)
def group(self, num):
"""Возвращает объект алфавитной группы"""
if len(self.groups) == 0:
return None
elif num > 0 and num <= len(self.groups):
return self.groups[num - 1]
else:
raise InvalidPage
@property
def num_groups(self):
"""Возвращает количество алфавитных групп"""
return len(self.groups)
class AlphabetGroup(object):
"""Алфавитная группа глоссария"""
def __init__(self, glossary):
self.glossary = glossary
self.object_list = []
self.letters = []
@property
def count(self):
"""Возвращает количество объектов в группе"""
return len(self.object_list)
@property
def start_letter(self):
"""Возвращает первую букву группы"""
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[0]
else:
return None
@property
def end_letter(self):
"""Возвращает последнюю букву группы"""
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[-1]
else:
return None
@property
def number(self):
"""Возвращает номер группы в глоссарии"""
return self.glossary.groups.index(self) + 1
def add(self, new_list, letter=None):
"""Добавляет список объектов в группу"""
if len(new_list) > 0:
self.object_list = self.object_list + new_list
if letter:
self.letters.append(letter)
def __repr__(self):
"""Возвращает метку группы"""
if self.start_letter == self.end_letter:
return self.start_letter
else:
return '%c-%c' % (self.start_letter, self.end_letter)
|
zarmoose/eastwood_test
|
employees/glossary.py
|
glossary.py
|
py
| 5,105 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 66,
"usage_type": "name"
}
] |
3578166823
|
import os
import torch
import csv
import pandas as pd
from config import FoldersConfig
def txt_to_csv(input_path, output_path):
with open(input_path, 'r') as in_file:
stripped = (line.strip() for line in in_file)
lines = (line.split() for line in stripped if line)
with open(output_path, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerows(lines)
def get_categories_and_path(input_path, output_path):
with open(input_path, 'r') as in_file:
reader = csv.reader(in_file)
next(reader)
row0 = next(reader)
with open(output_path, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(["path", "deep_fashion_category_name", "dataset"])
for r in reader:
split_r = r[0].split('_')[-2]
category = split_r.split('/')[-2]
r.append(r[0])
r.append(category)
r.append('deep_fashion')
writer.writerow( (r[2], r[3], r[4]) )
def add_column_with_article_type_equivalence(deep_fashion, map_to_product_fashion, output_path):
deep_fashion_df = pd.read_csv(deep_fashion, error_bad_lines=False)
map_to_product_fashion_df = pd.read_csv(map_to_product_fashion)
deep_fashion_with_article_type_df = deep_fashion_df.merge(map_to_product_fashion_df, on='deep_fashion_category_name', how='left')
deep_fashion_with_article_type_df['id'] = deep_fashion_with_article_type_df.index + 100000
deep_fashion_with_article_type_df = deep_fashion_with_article_type_df[['id', 'path', 'deep_fashion_category_name', 'product_fashion_article_type', 'dataset']]
deep_fashion_with_article_type_df.columns = ['id', 'path', 'categoryName', 'articleType', 'dataset']
deep_fashion_with_article_type_df.to_csv(output_path, index=False)
def prepare_datasets():
resources = FoldersConfig.RESOURCES_DIR
list_categories_path = resources + 'deep_fashion/list_category_img.txt'
list_categories_output_path = resources + 'deep_fashion/list_category_img.csv'
path_category_dataset = resources + 'deep_fashion/path_category_dataset.csv'
map_to_product_fashion_path = resources + 'map_deep_fashion_to_product_fashion.csv'
deep_fashion_with_article_type_path = resources + 'deep_fashion/deep_fashion_with_article_type.csv'
if not os.path.exists(list_categories_output_path):
txt_to_csv(list_categories_path, list_categories_output_path)
if not os.path.exists(path_category_dataset):
get_categories_and_path(list_categories_output_path, path_category_dataset)
if not os.path.exists(deep_fashion_with_article_type_path):
add_column_with_article_type_equivalence(path_category_dataset, map_to_product_fashion_path, deep_fashion_with_article_type_path)
if __name__ == "__main__":
prepare_datasets()
|
ferran-candela/upc-aidl-2021-image-retrieval
|
imageretrieval/src/prepare_datasets.py
|
prepare_datasets.py
|
py
| 2,898 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "csv.writer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "config.FoldersConfig.RESOURCES_DIR",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "config.FoldersConfig",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
}
] |
9324609377
|
from flask import Blueprint, render_template
redspine = Blueprint('redspine',
__name__,
template_folder='./',
static_folder='./',
static_url_path='/')
redspine.display_name = "Redspine"
redspine.published = False
redspine.description = "A red-spine notebook. Art that folds in on itself across pages by bleeding through."
@redspine.route('/')
def _redspine():
return render_template('redspine.html')
|
connerxyz/exhibits
|
cxyz/exhibits/redspine/redspine.py
|
redspine.py
|
py
| 491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
}
] |
35175789251
|
from django.shortcuts import render
from .models import Book, Shope
def home(request):
# qs = Post.objects.all()
# # The DB query has not been executed at this point
# x = qs
# # Just assigning variables doesn't do anything
# for x in qs:
# print(x)
# # The query is executed at this point, on iteration
# for x in qs:
# print("%d" % x.id)
# # The query is not executed this time, due to caching
post_qs = Book.objects.order_by('id')
for start, end, total, qs in batch_qs(post_qs):
print("Now processing %s - %s of %s" % (start + 1, end, total))
for post in qs:
print(post.name)
return render(request, 'query_optimization/home.html')
def batch_qs(qs, batch_size=10):
"""
Returns a (start, end, total, queryset) tuple for each batch in the given
queryset.
Usage:
# Make sure to order your querset
article_qs = Article.objects.order_by('id')
for start, end, total, qs in batch_qs(article_qs):
print "Now processing %s - %s of %s" % (start + 1, end, total)
for article in qs:
print article.body
"""
total = qs.count()
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
yield (start, end, total, qs[start:end])
# def home(request):
# books = Book.objects.all().only("name", "create_date")
# for each in books:
# print(each.name)
# print(f"Cache {books._result_cache}")
# return render(request, 'query_optimization/home.html')
def home(request):
queryset = Shope.objects.prefetch_related('book').all()
stores = []
for store in queryset:
books = [book.name for book in store.book.all()]
stores.append({'id': store.id, 'name': store.name, 'books': books})
return render(request, 'query_optimization/home.html')
queryset = Store.objects.prefetch_related(
Prefetch('books', queryset=Book.objects.filter(price__range=(250, 300))))
|
Azhar-inexture-1/django_practice_models
|
query_optimization/views.py
|
views.py
|
py
| 2,036 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Book.objects.order_by",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Book.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "models.Book",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.Shope.objects.prefetch_related",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.Shope.objects",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.Shope",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "models.Book.objects.filter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.Book.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Book",
"line_number": 63,
"usage_type": "name"
}
] |
26683410836
|
#!/usr/bin/python3
'''Defines a Base class
'''
import json
from os import path
class Base:
'''Represents a base class
Attributes:
__nb_objects: holds the number of Base instances created
'''
__nb_objects = 0
def __init__(self, id=None):
'''Instantiates a Base object
Args:
id: type int. Defaults to None
'''
if id is not None:
self.id = id
else:
type(self).__nb_objects += 1
self.id = type(self).__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
'''Returns the JSON string representation of list_dictionaries
'''
if list_dictionaries is None:
return '[]'
if type(list_dictionaries) is not list:
raise TypeError('to_json_string argument must be a list of dicts')
for obj in list_dictionaries:
if type(obj) is not dict:
raise TypeError('items in to_json_string arg must be dicts')
return json.dumps(list_dictionaries)
@classmethod
def save_to_file(cls, list_objs):
'''Writes the JSON string representation of list_objs to a file
'''
if type(list_objs) not in (None, list):
raise TypeError('list_objs must be of type list')
for obj in list_objs:
if type(obj) is not cls:
raise TypeError('items in list_objs must be of same type as cls')
filename = cls.__name__ + '.json'
with open(filename, 'w', encoding='utf-8') as f:
if list_objs is None:
f.write('[]')
else:
list_dicts = [obj.to_dictionary() for obj in list_objs]
# json.dump(list_dicts, f) achieves same thing as next line
f.write(Base.to_json_string(list_dicts))
@staticmethod
def from_json_string(json_string):
'''Returns the list of JSON string representation
'''
if json_string is None or json_string == '':
return []
if type(json_string) is not str:
raise TypeError('json_string must be str repr of a list of dicts')
return json.loads(json_string)
@classmethod
def create(cls, **dictionary):
'''Returns an instance with all attributes already set
'''
# Create a dummy Rectangle or Square instance
if cls.__name__ == 'Rectangle':
dummy = cls(1, 1)
elif cls.__name__ == 'Square':
dummy = cls(1)
dummy.update(**dictionary)
return dummy
@classmethod
def load_from_file(cls):
'''Returns a list of instances
'''
filename = cls.__name__ + '.json'
if path.exists(filename):
with open(filename, 'r', encoding='utf-8') as f:
json_string = f.read()
list_dict = cls.from_json_string(json_string)
return [cls.create(**d) for d in list_dict]
return []
|
nzubeifechukwu/alx-higher_level_programming
|
0x0C-python-almost_a_circle/models/base.py
|
base.py
|
py
| 2,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dumps",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "name"
}
] |
72331238589
|
import gc
import numpy as np
import xarray as xr
import scipy.ndimage.filters as conv
from . import dc_utilities as utilities
from datetime import datetime
####################################################
# | TSM |
####################################################
# 0.0001 for the scale of ls7 data.
def _tsmi(dataset):
return (dataset.red.astype('float64') + dataset.green.astype('float64'))*0.0001 / 2
def tsm(dataset_in, clean_mask=None, no_data=0):
# Create a clean mask from cfmask if the user does not provide one
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
tsm = 3983 * _tsmi(dataset_in)**1.6246
tsm.values[np.invert(clean_mask)] = no_data # Contains data for clear pixels
# Create xarray of data
time = dataset_in.time
latitude = dataset_in.latitude
longitude = dataset_in.longitude
dataset_out = xr.Dataset({'tsm': tsm},
coords={'time': time,
'latitude': latitude,
'longitude': longitude})
return dataset_out
def mask_tsm(dataset_in, wofs):
wofs_criteria = wofs.copy(deep=True).normalized_data.where(wofs.normalized_data > 0.8)
wofs_criteria.values[wofs_criteria.values > 0] = 0
kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])
mask = conv.convolve(wofs_criteria.values, kernel, mode ='constant')
mask = mask.astype(np.float32)
dataset_out = dataset_in.copy(deep=True)
dataset_out.normalized_data.values += mask
dataset_out.total_clean.values += mask
dataset_out.normalized_data.values[np.isnan(dataset_out.normalized_data.values)] = 0
dataset_out.total_clean.values[np.isnan(dataset_out.total_clean.values)] = 0
return dataset_out
|
ceos-seo/Data_Cube_v2
|
ui/django_site_v2/data_cube_ui/utils/dc_tsm.py
|
dc_tsm.py
|
py
| 1,835 |
python
|
en
|
code
| 26 |
github-code
|
6
|
[
{
"api_name": "numpy.invert",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xarray.Dataset",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters.convolve",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "numpy.isnan",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 48,
"usage_type": "call"
}
] |
9270626576
|
from dataclasses import dataclass
@dataclass
class block:
name: str
seperatorStart: str
seperatorEnd: str
def getBlock(blocks: list, input: list):
strings = list()
index = 0
offsetindex = 0
foundBlock = False
dontAppend = False
for string in input:
dontAppend = False
if (foundBlock and blocks[index].name == "*"):
if (type(strings) == list): strings = dict()
if (string.__contains__(blocks[index].seperatorStart)):
if (offsetindex == 0):
foundBlockName = string.split(blocks[index].seperatorStart)[0].strip().lower()
dontAppend = True
offsetindex += 1
elif (string.__contains__(blocks[index].seperatorEnd)):
if (offsetindex == 0): break
else: offsetindex -= 1
if (dontAppend == False and offsetindex > 0):
if (strings.__contains__(foundBlockName)):
strings[foundBlockName].append(string)
else:
strings[foundBlockName] = [string]
elif (foundBlock == True):
if (string.__contains__(blocks[index].seperatorStart)):
offsetindex += 1
elif (string.__contains__(blocks[index].seperatorEnd)):
if (offsetindex == 0): break
else: offsetindex -= 1
strings.append(string)
else:
if (string.__contains__(blocks[index].seperatorStart)):
stringSplit = string.split(blocks[index].seperatorStart, 1)
if (stringSplit[0].strip().lower() == blocks[index].name.lower().strip() and offsetindex == 0):
if (len(stringSplit[1].strip()) > 0):
strings.append(stringSplit[1].strip())
if (index + 1 <= len(blocks) - 1 ): index += 1
if (index == len(blocks) - 1): foundBlock = True
else:
offsetindex += 1
elif (string.__contains__(blocks[index].seperatorEnd)):
if (offsetindex > 0):
offsetindex -= 1
else: index -= 1
return strings
def getVariable(name: str, blocks: list, seperator: str, input: list):
if (len(blocks) > 0): block = getBlock(blocks, input)
else: block = input
if (name == "*"):
output = dict()
for string in block:
if (string.__contains__(seperator)):
stringSplit = [ stringSplit.strip() for stringSplit in string.split(seperator)]
output[stringSplit[0].lower()] = stringSplit[1]
else:
for string in block:
if (string.__contains__(seperator)):
stringSplit = [ stringSplit.strip() for stringSplit in string.split(seperator)]
if (stringSplit[0].lower() == name.lower()):
output = stringSplit[1]
break
return output
|
superboo07/TextAdventure
|
TAUtilities.py
|
TAUtilities.py
|
py
| 2,998 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dataclasses.dataclass",
"line_number": 3,
"usage_type": "name"
}
] |
7713977328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
import platform
import netifaces
myApp = Flask(__name__)
@myApp.route('/')
def home():
data = {'user': 'ramy', 'machine':platform.node(), 'os':platform.system(), 'dist':platform.linux_distribution(), 'interfaces':netifaces.interfaces()}
return render_template('index.system.html', title='Home', data=data)
if __name__ == '__main__':
myApp.run(host='0.0.0.0', port=999)
|
RMDHMN/pythonFlash_testing
|
system-template.py
|
system-template.py
|
py
| 469 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "platform.node",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "platform.linux_distribution",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "netifaces.interfaces",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
}
] |
6166872296
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 09:32:16 2017
@author: Francesco
"""
from sklearn.preprocessing import StandardScaler
import numpy as np
import threading as th
import time
import re
import matplotlib.pyplot as plt
movement_kind = ["wrist up",
"wrist down",
"wrist rotation out",
"wrist rotation inside",
"hand open",
"hand closed"]
class up(object):
index_array = []
value_array = []
def __init__(self):
self.index_array.append(0)
def update(self,index,value):
self.index_array.append(index)
self.value_array.append(value)
def load_from_C_formatting(file):
f = open(file,'r').read()
temp = f.split('\n')
Ltemp = len(temp)
state = int(temp[0][-1])
start_index = 0
X = []
Y = []
for j in range(1,Ltemp-1):
new_state = int(temp[j][-1])
if(int(new_state != state)):
Xraw = temp[start_index:j-1]
start_index = j
L = len(Xraw)
X_current = np.zeros((L,8))
i = 0
k = 0
for line in Xraw:
#print(line[:-2])
for value in line[:-2].split(','):
X_current[i,k] = value
k+=1
i+=1
k=0
Y.append(state)
X.append(X_current)
state = new_state
#last start index is the index of the last recording
Xraw = temp[start_index:-1]
L = len(Xraw)
X_current = np.zeros((L,8))
i = 0
k = 0
for line in Xraw:
#print(line[:-2])
for value in line[:-2].split(','):
X_current[i,k] = value
k+=1
i+=1
k=0
Y.append(state)
X.append(X_current)
figures = []
for movement in np.unique(Y):
figures.append(plt.subplots(1,1))
for p in [2,3,4,5,6,7]:
y = X[movement][:,p]
moving_average(y,10)
#figures è una tupla (fig,axes) e noi dobbiamo
#plottare su axes
movement = int(movement)
figures[movement][1].plot(y,color=colorarray[p],label='ch'+str(p))
legend = figures[movement][1].legend(loc='upper left', shadow=True)
figures[movement][0].suptitle(movement_kind[movement])
plt.show()
return (X,Y)
def load_dataset(file):
""" RIFARE INIZIO """
n_channels = 8
f = open(file,'r')
#jump to the second block(the first is corrupted)
while(1):
if(f.read(1) == '-'):
start = f.tell()+2
break
f.seek(start)
#now we are ready to read the first block, which is the first feature actually
#understand the block length, must be equal for each block
dataset = f.read()
n_linee = 0
for line in dataset.split('\n'):
n_linee+=1
if(line == '-'):
n_linee -= 1
break
len_blocco = n_linee+1
#create the structure that will hold the features
#each feature is a matrix n_linee*9 (n_channels + classe movimento)
n_blocks = (len(dataset.split('\n'))-1)/len_blocco
features = np.zeros((n_linee,n_channels+1,int(n_blocks)+1))
i = 0
j = 0
block = 0
for line in dataset.split('\n'):
if(len(line)<5):
block+=1
i = 0
#print(line)
else:
for value in line.split(','):
features[i,j,block] = value
j+=1
#print(line)
j=0
i+=1
return features
def gradient(data,channels):
der = np.zeros((len(data),channels))
for i in range(1,len(data)):
der[i,:] = data[i,:]-data[i-1,:]
return der
def moving_average(data,samp_for_average):
n_windows = int(len(data)/samp_for_average)
for i in range(n_windows):
data[i*samp_for_average:(i+1)*samp_for_average] = np.average(data[i*samp_for_average:(i+1)*samp_for_average])
def open_outfile(file,rep):
f = open(file,'r').read()
lines = f.split('\n')
info_decoded = lines[rep].split('\t')
first_matrix = info_decoded[:-1]
n_cluster = int(info_decoded[-1])
#this code fails when there is a number without decimals, because 2. doesn't match the pattern
#since it searches for another number after the dot, that's the reason why the second "try"
#to catch this behaviour we say that two possible patterns may exist, 3.0 is recognized as well as 3.
patterns=re.compile(r'-\d+\.\d+|\d+\.\d+|-\d+\.|\d+\.')
#as a note: we search for both positive or negative(minus sign) but the order is important,
#because if -\d+\. was before -\d+\.\d+, the number -2.3 would be recognized as -2.
matrix = np.array(patterns.findall(first_matrix[0]),dtype='f')
for row in first_matrix[1:]: #the first has alread been taken
try:
temp = np.array(patterns.findall(row),dtype='f')
matrix = np.vstack((matrix,temp))
except ValueError:
print("Error:",row)
return (matrix,n_cluster)
#load data
def load_data_into_matrix(file,startline=0,endline=-1,cols=8,mode="signal"):
graph_data = open(file,'r').read()
lines = graph_data.split('\n')
n_channels = cols
n_lines = (len(lines))
vertical_lines = len(lines[startline:endline])
data = np.zeros((vertical_lines,n_channels)) #read all channels (8), plot only the desired
#the last acquisition may be corrupted, sudden termination of serial comm
#the first lines may be corrupted by giggering of the sensors/serial reads garbage
if mode == "signal":
i=0
j=0
for line in lines[startline:endline]:
if(len(line)>1):
t = line.split(',')
for value in t:
data[i,j] = t[j]
j+=1
j=0
i+=1
return data
if mode == "encoded":
i=0
j=0
data = np.chararray((n_lines - (startline-endline),n_channels))
for line in lines[startline:endline]:
if(len(line)>1):
t = line.split(',')
for value in t:
data[i,j] = t[j]
j+=1
j=0
i+=1
return data
def unsigned_derivative(x_t,x_tmen1):
return np.abs((x_t - x_tmen1)/x_t)
colorarray = ['b','g','r','c','m','y','k','0.75']
mode = {'polso_piegato_alto':[0,1,4], #estensori
'polso_piegato_basso':[2,3,7], #flessori
'polso_ruotato_esterno':[0,3], #ulnari
'polso_ruotato_interno':[1,2], #radiali
'updown':[0,1],
'intest':[2,3],
'tutti':range(8)}
class track(object):
def __init__(self,data):
self.data = data
self.channels = data.shape[1] #number of channels
self.samples = data.shape[0] #number of samples
def set_baseline(self,number_of_samples = 30):
#define the baseline for each channel, with this code we
#don't care about how many channels are there, 2 or 3 or n
#the shape of baseline will be 1xn
#basically this code is doing this: for each column sum the first
#30 values and do the average, the subtract this value from
#all the values
self.baseline = np.sum(self.data[0:number_of_samples,:],axis=0)/number_of_samples
self.data -= self.baseline
def moving_avg(self,samp_for_average):
n_windows = int(len(self.data)/samp_for_average)
for s in range(self.channels):
for i in range(n_windows):
self.data[i*samp_for_average:(i+1)*samp_for_average,s] = np.average(self.data[i*samp_for_average:(i+1)*samp_for_average,s])
def __getitem__(self,index):
return self.data[index[0]][index[1]]
def read_channel(self,channel):
return self.data[:,channel]
def shape(self):
return self.data.shape
class computation(th.Thread):
def __init__(self,name,signal):
th.Thread.__init__(self)
self.signal = signal
self.name = name
def run(self):
#we use alto/basso together with esterno/interno since
#they aren't mutual exclusive movements, the wirst can
#in fact go up while it may be extern/intern, but cannot go
#up while it is down
#we somehow simulate the fact that we are reading a stream of data
#so we don't use all the data together, but once more at each step
#feature extraction: position: baseline and movement: derivative
#t represents time that goes by
""" !!!! MUST BE A MULTIPLE OF 10 !!!! """
windows_length = 10
n_chann = self.signal.shape()[1]
encoder = (lambda Y: 'a' if Y > windows_length/100 else 'b' if Y > -windows_length/100 else 'c')
encoded = ['x']*8
t = 0
outfile = open('thread_data'+self.name+'.txt','w')
#outfilerrr = open('prova_pos'+self.name+'.txt','w')
flag = 1
print("%s: samples %d, channels %d"%(self.name,self.signal.shape()[0],self.signal.shape()[1]) )
try:
while(1):
der_ = self.signal[t,:] - self.signal[t+windows_length,:]
#print(der_[0], self.signal[t,0], self.signal[t+windows_length,0] )
#se deltaY > deltaX .... calcoli sul quaderno,
#qua aggiungo solo deltaX è sempre "window length" perchè è la distanza alla quale sono presi i punti
i=0
encoded[0] = encoder(der_[0])
outfile.write("%c"%encoded[0])
for i in range(1,8):
encoded[i] = encoder(der_[i])
outfile.write(',')
outfile.write("%c"%encoded[i])
#slide window
t += windows_length #deve essere almeno superiore alla media mobile
#print(line)
flag+=1
outfile.write('\n')
#print(time.time()-start_time)
except IndexError:
outfile.close()
print(flag)
"""
*********************** MAIN **********************
"""
class offline_process(object):
def __init__(self,filename):
""" LOAD DATA """
data = load_data_into_matrix(filename,0,-1,8)
""" DIVIDE INTO MEANINGFUL CHANNELS """
self.polso_updown = track(data[:,mode['tutti']])
#self.polso_intest = track(data[:,mode['intest']])
""" REMOVE BASELINE """
# self.polso_alto_track.set_baseline()
# self.polso_basso_track.set_baseline()
# self.polso_esterno_track.set_baseline()
# self.polso_interno_track.set_baseline()
""" LOW PASS FILTER """
self.polso_updown.moving_avg(10)
#self.polso_intest.moving_avg(30)
""" START TWO THREADS TO COMPUTE"""
self.thread_updown = computation("-encoding",self.polso_updown)
#self.thread_leftright = computation("intest",self.polso_updown)
def __call__(self):
#start a thread for each computation, which is left-right or
#up down
try:
self.thread_updown.start()
#self.thread_leftright.start()
self.thread_updown.join()
#self.thread_leftright.join()
except KeyboardInterrupt:
self.thread_updown.join()
#self.thread_leftright.join()
class occurrence_table(object):
def __init__(self):
self.items = []
self.number_of_occurrence = []
self.l = 0
self.total = 0
def __repr__(self):
return "Object filled with %d items"%self.l
def __str__(self):
for i in range(self.l):
print("%s: %d"%(self.items[i],self.number_of_occurrence[i]))
return "----- End ------ "
def append(self,item):
j=0
for occurrence in self.items:
if occurrence != item:
j=j+1
else:
self.number_of_occurrence[j]+=1
self.total += 1
return
#se hai fatto tutto il for senza entrare nell'else vuol
#dire che è nuovo, quindi lo appendo
self.items.append(item)
#ovviamente metto nel conteggio che ne ho aggiunto uno
self.number_of_occurrence.append(1)
self.l += 1
self.total += 1
#conteggio e item sono due liste separate ma l'elemento
#j esimo di number_of.. indica quante volte l'elemento
#j esimo di items è presente
def get(self):
return (self.items,self.number_of_occurrence)
def prob(self):
temp = [1]*self.l
for i in range(self.l):
temp[i] = self.number_of_occurrence[i]/self.total
return temp
if __name__ == "__main__":
p = offline_process("model_updown.txt")
p()
encoded_signal = load_data_into_matrix("thread_data-encoding.txt",mode="encoded")
entropy = lambda p: 0 if p==0 else -p*np.log2(p)
symbols_taken = 3
n_samples = encoded_signal.shape[0]
window_len = 30
start = 0
start_for_plot = 0
channel = encoded_signal.shape[1]
n_steps= window_len - symbols_taken + 1
print("n_steps:",n_steps)
ch = np.zeros((n_samples - window_len,channel),dtype='f')
outfile = open('entropy.txt','w')
while(start < n_samples - window_len):
table = []
for i in range(channel):
table.append(occurrence_table())
window = encoded_signal[start:start+window_len,:]
for i in range(n_steps):
for j in range(channel):
table[j].append(window[i:i+symbols_taken,j].tostring())
entropy_per_channel = [0]*channel #il massimo dell'entropia quando ho tutto uguale, 3**3 perchè ho 3 simboli per 3 posizioni
for j in range(channel):
list_of_prob = table[j].prob()
#print(list_of_prob)
for i in range(len(list_of_prob)):
entropy_per_channel[j] += entropy(list_of_prob[i])
ch[start_for_plot,j] = entropy_per_channel[j]
outfile.write(str(entropy_per_channel[j]))
outfile.write('\t')
start += 1
start_for_plot += 1
outfile.write('\n')
#print(table[0])
outfile.close()
fig2, ax2 = plt.subplots(1,1)
for p in range(channel):
y = ch[:,p]
ax2.plot(y,color=colorarray[p],label='ch'+str(p))
legend = ax2.legend(loc='upper left', shadow=True)
plt.show()
#
|
FrancesoM/UnlimitedHand-Learning
|
python_side/utilities.py
|
utilities.py
|
py
| 16,063 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.chararray",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "numpy.log2",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 506,
"usage_type": "name"
}
] |
30354806111
|
import sys
# Enthought library imports
from pyface.qt import QtCore, QtGui
# Local imports
from tvtk.util.gradient_editor import (
ColorControlPoint, ChannelBase, FunctionControl, GradientEditorWidget
)
##########################################################################
# `QGradientControl` class.
##########################################################################
class QGradientControl(QtGui.QWidget):
"""Widget which displays the gradient represented by an GradientTable
object (and does nothing beyond that)"""
def __init__(self, parent=None, gradient_table=None, width=100, height=100):
"""master: panel in which to place the control. GradientTable is the
Table to which to attach."""
super(QGradientControl, self).__init__(parent=parent)
self.resize(width, height)
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
self.width = width
self.height = height
self.gradient_table = gradient_table
assert gradient_table.size == width
self.setMinimumSize(100, 50)
# currently only able to use gradient tables in the same size as the
# canvas width
def paintEvent(self, event):
"""Paint handler."""
super(QGradientControl, self).paintEvent(event)
painter = QtGui.QPainter(self)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0) )
painter.setBrush(brush)
painter.setBackgroundMode(QtCore.Qt.OpaqueMode)
sz = self.size()
width, height = sz.width(), sz.height()
xform = self.gradient_table.scaling_function
start_y = 0
end_y = height
if xform:
# if a scaling transformation is provided, paint the original
# gradient under the scaled gradient.
start_y = height/2
# paint the original gradient as it stands in the table.
color = QtGui.QColor()
for x in range(width):
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(float(x)/(width-1))
color.setRgb(int(255*r), int(255*g), int(255*b))
painter.setPen(color)
brush.setColor(color)
painter.drawLine(x, start_y, x, end_y)
if xform:
# paint the scaled gradient below
end_y = start_y
start_y = 0
for x in range(width):
f = float(x)/(width-1)
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(xform(f))
color.set(int(255*r), int(255*g), int(255*b))
brush.setColor(color)
painter.drawLine(x, start_y, x, end_y)
##########################################################################
# `Channel` class.
##########################################################################
class Channel(ChannelBase):
def paint(self, painter):
"""Paint current channel into Canvas (a canvas of a function control
object).
Contents of the canvas are not deleted prior to painting,
so more than one channel can be painted into the same canvas.
"""
table = self.control.table
# only control points which are active for the current channel
# are to be painted. filter them out.
relevant_control_points = [
x for x in table.control_points if self.name in x.active_channels
]
# lines between control points
color = QtGui.QColor(*self.rgb_color)
painter.setPen(color)
brush = QtGui.QBrush(color)
painter.setBrush(brush)
painter.setBackgroundMode(QtCore.Qt.OpaqueMode)
for k in range( len(relevant_control_points) - 1 ):
cur_point = relevant_control_points[k]
next_point = relevant_control_points[1+k]
painter.drawLine(self.get_pos_index(cur_point.pos),
self.get_value_index(cur_point.color),
self.get_pos_index(next_point.pos),
self.get_value_index(next_point.color))
# control points themself.
color = QtCore.Qt.black
painter.setPen(color)
for control_point in relevant_control_points:
x = self.get_pos_index( control_point.pos )
y = self.get_value_index( control_point.color )
radius=6
#print(x,y)
painter.drawRect(x-(radius/2.0), y-(radius/2.0), radius, radius)
painter.drawRect(100,80,6,6)
##########################################################################
# `QFunctionControl` class.
##########################################################################
class QFunctionControl(QtGui.QWidget, FunctionControl):
"""Widget which displays a rectangular regions on which hue, sat, val
or rgb values can be modified. An function control can have one or more
attached color channels."""
# Radius around a control point center in which we'd still count a
# click as "clicked the control point"
control_pt_click_tolerance = 4
ChannelFactory = Channel
def __init__(self, master=None, gradient_table=None, color_space=None,
width=100, height=100):
"""Initialize a function control widget on tkframe master.
Parameters:
-----------
master: The master widget. Note that this widget *must* have
the methods specified in the `AbstractGradientEditorWidget`
interface.
on_table_changed: Callback function taking a bool argument of meaning
'FinalUpdate'. FinalUpdate is true if a control point is dropped,
created or removed and false if the update is due to a control point
currently beeing dragged (but not yet dropped)
color_space: String which specifies the channels painted on this control.
May be any combination of h,s,v,r,g,b,a in which each channel
occurs only once.
set_status_text: a callback used to set the status text
when using the editor.
"""
kw = dict(
master=master, gradient_table=gradient_table,
color_space=color_space, width=width,
height=height
)
super().__init__(**kw)
self.resize(width, height)
self.setMinimumSize(100, 50)
######################################################################
# Qt event handlers.
######################################################################
def paintEvent(self, event):
super(QFunctionControl, self).paintEvent(event)
painter = QtGui.QPainter(self)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
painter.setBrush(brush)
width, height = self.size().width(), self.size().height()
painter.drawRect(0, 0, width, height)
for channel in self.channels:
channel.paint(painter)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.cur_drag = self.find_control_point(event.x(), event.y())
super(QFunctionControl, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
elif event.button() == QtCore.Qt.RightButton:
# toggle control point. check if there is a control point
# under the mouse. If yes, delete it, if not, create one
# at that point.
cur_control_point = self.find_control_point(event.x(), None)
if cur_control_point:
# found a marker at the click position. delete it and return,
# unless it is a fixed marker (at pos 0 or 1)..
if ( cur_control_point[1].fixed ):
# in this case do nothing. Fixed markers cannot be deleted.
return
self.table.control_points.remove(cur_control_point[1])
self.table_config_changed(final_update=True)
else:
# since there was no marker to remove at the point, we assume
# that we should place one there
new_control_point = ColorControlPoint(active_channels=self.active_channels_string)
new_control_point.set_pos(self.channels[0].get_index_pos(event.x()))
# set new control point color to the color currently present
# at its designated position
new_control_point.color = self.table.get_pos_color(new_control_point.pos)
self.table.insert_control_point(new_control_point)
self.table_config_changed(final_update = True)
if isinstance(event, QtGui.QMouseEvent):
super(QFunctionControl, self).mouseReleaseEvent(event)
def leaveEvent(self, event):
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
super(QFunctionControl, self).leaveEvent(event)
def resizeEvent(self, event):
sz = self.size()
self.width = sz.width()
self.height = sz.height()
def mouseMoveEvent(self, event):
# currently dragging a control point?
channel = None
point = None
if self.cur_drag:
channel = self.cur_drag[0]
point = self.cur_drag[1]
if ( not point.fixed ):
point.set_pos( channel.get_index_pos(event.x()) )
point.activate_channels( self.active_channels_string )
self.table.sort_control_points()
channel.set_value_index( point.color, event.y() )
self.table_config_changed( final_update = False )
screenX = event.x()
screenY = event.y()
width, height = self.size().width(), self.size().height()
master = self.master
s1, s2 = master.get_table_range()
if channel is not None:
name = self.text_map[channel.name]
pos = s1 + (s2 - s1)*point.pos
val = channel.get_value(point.color)
txt = '%s: (%.3f, %.3f)'%(name, pos, val)
else:
x = s1 + (s2 - s1)*float(screenX)/(width-1)
y = 1.0 - float(screenY)/(height-1)
txt = "position: (%.3f, %.3f)"%(x, y)
self.master.set_status_text(txt)
##########################################################################
# `QGradientEditorWidget` class.
##########################################################################
class QGradientEditorWidget(QtGui.QWidget, GradientEditorWidget):
"""A Gradient Editor widget that can be used anywhere.
"""
def __init__(self, master, vtk_table, on_change_color_table=None,
colors=None):
"""
Parameters:
-----------
vtk_table : the `tvtk.LookupTable` or `tvtk.VolumeProperty` object
to set.
on_change_color_table : A callback called when the color table
changes.
colors : list of 'rgb', 'hsv', 'h', 's', 'v', 'a'
(Default : ['rgb', 'hsv', 'a'])
'rgb' creates one panel to edit Red, Green and Blue
colors.
'hsv' creates one panel to edit Hue, Saturation and
Value.
'h', 's', 'v', 'r', 'g', 'b', 'a' separately
specified creates different panels for each.
"""
kw = dict(master=master, vtk_table=vtk_table,
on_change_color_table=on_change_color_table,
colors=colors)
super().__init__(**kw)
gradient_preview_width = self.gradient_preview_width
gradient_preview_height = self.gradient_preview_height
channel_function_width = self.channel_function_width
channel_function_height = self.channel_function_height
# set up all the panels in a grid
# 6x2 size: 6 rows, 2 columns...
grid = QtGui.QGridLayout()
grid.setColumnStretch(0, 0)
grid.setColumnStretch(1, 1)
# "Gradient Viewer" panel, in position (0,1) for sizer
self.gradient_control = QGradientControl(self,
self.gradient_table,
gradient_preview_width,
gradient_preview_height)
self.setToolTip('Right click for menu')
grid.addWidget(QtGui.QLabel("", self), 0, 0)
grid.addWidget(self.gradient_control, 0, 1)
# Setup the context menu to fire for the Gradient control alone.
gc = self.gradient_control
gc.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
gc.customContextMenuRequested.connect(self.contextMenuEventOnGradient)
# Add the function controls:
function_controls = self.function_controls
editor_data = self.editor_data
row = 1
for color in self.colors:
data = editor_data[color]
control = QFunctionControl(self, self.gradient_table, color,
channel_function_width,
channel_function_height)
txt = data[0] + self.tooltip_text
control.setToolTip(txt)
# Add name of editor (to left side of editor)
grid.addWidget(QtGui.QLabel(data[1], self), row, 0)
# Add the "RGB" control point editor
grid.addWidget(control, row, 1)
function_controls.append(control)
row += 1
# The status text.
self.text = QtGui.QLabel('status', self)
grid.addWidget(self.text, row, 0, 1, 2)
self.setLayout(grid)
self.show()
######################################################################
# `GradientEditorWidget` interface.
######################################################################
def set_status_text(self, msg):
self.text.setText(msg)
######################################################################
# Qt event methods.
######################################################################
def contextMenuEventOnGradient(self, pos):
menu = QtGui.QMenu(self)
saveAction = menu.addAction("Save as")
loadAction = menu.addAction("Load")
action = menu.exec_(self.mapToGlobal(pos))
if action == saveAction:
self.on_save()
elif action == loadAction:
self.on_load()
def on_save(self, event=None):
"""
Open "Save" dialog, write lookuptable to 3 files: ``*.lut``
(lookuptable) ``*.grad`` (gradient table for use with this program),
and ``*.jpg`` (image of the gradient)
"""
wildcard = "Gradient Files (*.grad);;All Files (*.*)"
filename, filter = QtGui.QFileDialog.getSaveFileName(self,
"Save LUT to...",
'',
wildcard)
if filename:
self.save(filename)
def on_load(self, event=None):
"""
Load a ``*.grad`` lookuptable file.
"""
wildcard = "Gradient Files (*.grad);;All Files (*.*)"
filename, filter = QtGui.QFileDialog.getOpenFileName(self,
"Open gradient file...",
'',
wildcard)
if filename:
self.load(filename)
##########################################################################
# `QGradientEditor` class.
##########################################################################
class QGradientEditor(QtGui.QMainWindow):
""" QMainWindow that displays the gradient editor window,
i.e. the thing that contains the gradient display, the function
controls and the buttons.
"""
def __init__(self, vtk_table, on_change_color_table=None, colors=None):
"""Initialize the gradient editor window.
Parameters
----------
vtk_table: Instance of vtkLookupTable, designating the table which is
to be edited.
on_change_color_table: Callback function taking no arguments. Called
when the color table was changed and rendering is
requested.
"""
super(QGradientEditor, self).__init__()
self.setWindowTitle("Color Gradient Editor")
self.widget = QGradientEditorWidget(
master=self, vtk_table=vtk_table,
on_change_color_table=on_change_color_table,
colors=colors
)
self.setCentralWidget(self.widget)
self.resize(300, 500)
self.statusBar()
## Set up the MenuBar
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_action = QtGui.QAction("&Save", self)
file_action.setStatusTip("Save CTF")
file_action.triggered.connect(self.widget.on_save)
file_menu.addAction(file_action)
load_action = QtGui.QAction("&Load", self)
load_action.setStatusTip("Load CTF")
load_action.triggered.connect(self.widget.on_load)
file_menu.addAction(load_action)
quit_action = QtGui.QAction("&Quit", self)
quit_action.setStatusTip("Quit application")
quit_action.triggered.connect(QtGui.QApplication.instance().quit)
file_menu.addAction(quit_action)
help_menu = menu.addMenu("&Help")
action = QtGui.QAction("&Help", self)
action.setStatusTip("Help")
action.triggered.connect(self.on_help)
help_menu.addAction(action)
action = QtGui.QAction("&About", self)
action.setStatusTip("About application")
action.triggered.connect(self.on_about)
help_menu.addAction(action)
def on_help(self, event=None):
""" Help defining the mouse interactions """
message = "Right click to add control points. Left click to move control points"
QtGui.QMessageBox.information(self, 'Help', message)
def on_about(self, event=None):
""" Who wrote the program?"""
message = 'tk Gradient Editor for MayaVi1: Gerald Knizia ([email protected])\n'\
'wxPython port: Pete Schmitt ([email protected])\n'\
'Qt port: Prabhu Ramachandran\n'\
'Enhanced for Mayavi2: Prabhu Ramachandran'
QtGui.QMessageBox.information(self, 'About gradient editor', message)
def main():
from tvtk.util.traitsui_gradient_editor import make_test_table
import sys
table, ctf, otf = make_test_table(lut=False)
# the actual gradient editor code.
def on_color_table_changed():
"""If we had a vtk window running, update it here"""
# print("Update Render Window")
pass
app = QtGui.QApplication.instance()
editor = QGradientEditor(table,
on_color_table_changed,
colors=['rgb', 'a', 'h', 's', 'v'],
)
editor.setWindowTitle("Gradient editor")
editor.show()
sys.exit(app.exec_())
##########################################################################
# Test application.
##########################################################################
if __name__ == "__main__":
main()
|
enthought/mayavi
|
tvtk/util/qt_gradient_editor.py
|
qt_gradient_editor.py
|
py
| 19,600 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
[
{
"api_name": "pyface.qt.QtGui.QWidget",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QPainter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QBrush",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QColor",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QColor",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "tvtk.util.gradient_editor.ChannelBase",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QColor",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QBrush",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QWidget",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "tvtk.util.gradient_editor.FunctionControl",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QPainter",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QBrush",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QColor",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "tvtk.util.gradient_editor.ColorControlPoint",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QMouseEvent",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QWidget",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "tvtk.util.gradient_editor.GradientEditorWidget",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QGridLayout",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QLabel",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QLabel",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QLabel",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QMenu",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QFileDialog.getSaveFileName",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QFileDialog",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QFileDialog.getOpenFileName",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QFileDialog",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QMainWindow",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAction",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAction",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 431,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAction",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QApplication.instance",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QApplication",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAction",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAction",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QMessageBox.information",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QMessageBox",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QMessageBox.information",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QMessageBox",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "tvtk.util.traitsui_gradient_editor.make_test_table",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QApplication.instance",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QApplication",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 483,
"usage_type": "call"
}
] |
41913795360
|
import logging
from functools import partial
from datasets import load_dataset
from transformers import (
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from src.callbacks import ShuffleCallback
from src.config import Config, TrainingArgumentsConfig
from src.data_collator import DataCollatorSpeechSeq2SeqWithPadding
from src.metrics import compute_metrics
from src.prepare_dataset import prepare_dataset
logging.basicConfig(level=logging.INFO)
def train():
config = Config()
training_args_config = TrainingArgumentsConfig()
training_args = Seq2SeqTrainingArguments(**training_args_config.dict())
if config.prepare_dataset:
dataset, _ = prepare_dataset(config)
else:
dataset = load_dataset(config.dataset_name, config.dataset_lang)
logging.info("Training model...")
model = WhisperForConditionalGeneration.from_pretrained(config.model_name)
processor = WhisperProcessor.from_pretrained(
config.model_name, task=config.task, language=config.model_lang
)
compute_metrics_fn = partial(compute_metrics, processor=processor)
trainer = Seq2SeqTrainer(
args=training_args,
model=model,
train_dataset=dataset["train"],
eval_dataset=dataset["validation"],
data_collator=DataCollatorSpeechSeq2SeqWithPadding(processor=processor),
compute_metrics=compute_metrics_fn,
tokenizer=processor,
callbacks=[ShuffleCallback()],
)
trainer.train()
trainer.push_to_hub()
if __name__ == "__main__":
train()
|
Giorgi-Sekhniashvili/geo_whisper
|
train.py
|
train.py
|
py
| 1,605 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "src.config.Config",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "src.config.TrainingArgumentsConfig",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "transformers.Seq2SeqTrainingArguments",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "src.prepare_dataset.prepare_dataset",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datasets.load_dataset",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "transformers.WhisperForConditionalGeneration.from_pretrained",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "transformers.WhisperForConditionalGeneration",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "transformers.WhisperProcessor.from_pretrained",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "transformers.WhisperProcessor",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "src.metrics.compute_metrics",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "transformers.Seq2SeqTrainer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "src.data_collator.DataCollatorSpeechSeq2SeqWithPadding",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "src.callbacks.ShuffleCallback",
"line_number": 45,
"usage_type": "call"
}
] |
8499225984
|
from booleano.exc import InvalidOperationError
from booleano.operations.operands import Operand
__all__ = ["String", "Number", "Arithmetic", "Set"]
class Constant(Operand):
"""
Base class for constant operands.
The only operation that is common to all the constants is equality (see
:meth:`equals`).
Constants don't rely on the context -- they are constant!
.. warning::
This class is available as the base for the built-in :class:`String`,
:class:`Number` and :class:`Set` classes. User-defined constants aren't
supported, but you can assign a name to a constant (see
:term:`binding`).
"""
operations = {'equality'}
def __init__(self, constant_value):
"""
:param constant_value: The Python value represented by the Booleano
constant.
:type constant_value: :class:`object`
"""
self.constant_value = constant_value
def to_python(self, context):
"""
Return the value represented by this constant.
"""
return self.constant_value
def equals(self, value, context):
"""
Check if this constant equals ``value``.
"""
return self.constant_value == value
def check_equivalence(self, node):
"""
Make sure constant ``node`` and this constant are equivalent.
:param node: The other constant which may be equivalent to this one.
:type node: Constant
:raises AssertionError: If the constants are of different types or
represent different values.
"""
super(Constant, self).check_equivalence(node)
assert node.constant_value == self.constant_value, \
u'Constants %s and %s represent different values' % (self,
node)
class String(Constant):
u"""
Constant string.
These constants only support equality operations.
.. note:: **Membership operations aren't supported**
Although both sets and strings are item collections, the former is
unordered and the later is ordered. If they were supported, there would
some ambiguities to sort out, because users would expect the following
operation results:
- ``"ao" ⊂ "hola"`` is false: If strings were also sets, then the
resulting operation would be ``{"a", "o"} ⊂ {"h", "o", "l", "a"}``,
which is true.
- ``"la" ∈ "hola"`` is true: If strings were also sets, then the
resulting operation would be ``{"l", "a"} ∈ {"h", "o", "l", "a"}``,
which would be an *invalid operation* because the first operand must
be an item, not a set. But if we make an exception and take the first
operand as an item, the resulting operation would be
``"la" ∈ {"h", "o", "l", "a"}``, which is not true.
The solution to the problems above would involve some magic which
contradicts the definition of a set: Take the second operand as an
*ordered collection*. But it'd just cause more trouble, because both
operations would be equivalent!
Also, there would be other issues to take into account (or not), like
case-sensitivity.
Therefore, if this functionality is needed, developers should create
functions to handle it.
"""
def __init__(self, string):
"""
:param string: The Python string to be represented by this Booleano
string.
:type string: :class:`basestring`
``string`` will be converted to :class:`unicode`, so it doesn't
have to be a :class:`basestring` initially.
"""
import sys
if sys.version_info >= (3, 0):
string = str(string)
else:
string = unicode(string)
super(String, self).__init__(string)
def equals(self, value, context):
"""Turn ``value`` into a string if it isn't a string yet"""
value = str(value)
return super(String, self).equals(value, context)
def __unicode__(self):
"""Return the Unicode representation of this constant string."""
return u'"%s"' % self.constant_value
def __hash__(self):
return id(self)
def __repr__(self):
"""Return the representation for this constant string."""
return '<String "%s">' % self.constant_value.encode("utf-8")
class ArithmeticVariable(object):
def __init__(self, number, namespace, namespace_separator=":"):
self.namespace_separator = namespace_separator
self.parsed_results = number
self._namespace = namespace
self.variables = {}
self.__define_variables()
number = self.flatten(self.parsed_results)
self.__full_expression = "".join(number)
def __str__(self):
number = self.flatten(self.parsed_results)
return "".join(number)
def __define_variables(self):
number = self.parsed_results
temp = []
for n in number:
t = self.__get_variable_names(n)
if isinstance(t, list):
temp.extend(t)
else:
temp.append(t)
self.required_variables = temp
temp = {}
for v in self.required_variables:
for k, val in v.items():
temp[k] = val
self.required_variables = temp
def __get_variable_names(self, number):
from pyparsing import ParseResults
import re
temp = []
if isinstance(number, ParseResults):
for n in number:
t = self.__get_variable_names(n)
if isinstance(t, list):
temp.extend(t)
else:
temp.append(t)
return temp
elif len(re.findall("[a-zA-Z" + self.namespace_separator + "]+", number)) > 0:
var = str(number).split(self.namespace_separator)
variable_namespaces = var[0:-1]
variable_name = var[-1]
return {str(number): self._namespace.get_object(variable_name, variable_namespaces)}
return temp
@classmethod
def flatten(cls, s):
from pyparsing import ParseResults
if s == []:
return s
if isinstance(s[0], ParseResults):
return cls.flatten(s[0]) + cls.flatten(s[1:])
return s[:1] + cls.flatten(s[1:])
def replace(self, num, context, namespace=True):
for k, v in self.required_variables.items():
if namespace and self.namespace_separator not in k:
continue
num = num.replace(k, str(v.to_python(context)))
return num
def evaluate(self, context):
number = self.__full_expression
# Replace all variables with numbers
# First replace variables with namespaces defined to avoid clobbering
number = self.replace(number, context)
# Then replace variables with no namespace
number = self.replace(number, context, False)
number = number.replace("^", "**")
from booleano import SafeEval
answer = SafeEval.eval_expr(number)
return answer
class Arithmetic(Constant):
"""
Numeric constant.
These constants support inequality operations; see :meth:`greater_than`
and :meth:`less_than`.
"""
operations = Constant.operations | {'inequality'}
def __init__(self, number, namespace, namespace_separator=":"):
"""
:param number: The number to be represented, as a Python object.
:type number: :class:`object`
``number`` is converted into a :class:`float` internally, so it can
be an :class:`string <basestring>` initially.
"""
self.namespace_sparator = namespace_separator
super(Arithmetic, self).__init__(ArithmeticVariable(number, namespace, namespace_separator))
def equals(self, value, context):
"""
Check if this numeric constant equals ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
print("Constant equals")
return super(Arithmetic, self).equals(self._to_number(value), context)
def greater_than(self, value, context):
"""
Check if this numeric constant is greater than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
print("Constant gt")
return self.constant_value > self._to_number(value)
def less_than(self, value, context):
"""
Check if this numeric constant is less than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
print("Constant lt")
return self.constant_value < self._to_number(value)
def to_python(self, context):
return self.constant_value.evaluate(context)
def _to_number(self, value):
"""
Convert ``value`` to a Python float and return the new value.
:param value: The value to be converted into float.
:return: The value as a float.
:rtype: float
:raises InvalidOperationError: If ``value`` can't be converted.
"""
print("Constant to_num")
try:
return float(value)
except ValueError:
raise InvalidOperationError('"%s" is not a number' % value)
def __unicode__(self):
"""Return the Unicode representation of this constant number."""
print("constant unicode")
return str(self.constant_value)
def __repr__(self):
"""Return the representation for this constant number."""
return '<Arithmetic %s>' % self.constant_value
class Number(Constant):
"""
Numeric constant.
These constants support inequality operations; see :meth:`greater_than`
and :meth:`less_than`.
"""
operations = Constant.operations | {'inequality'}
def __init__(self, number):
"""
:param number: The number to be represented, as a Python object.
:type number: :class:`object`
``number`` is converted into a :class:`float` internally, so it can
be an :class:`string <basestring>` initially.
"""
number = float(number)
super(Number, self).__init__(number)
def equals(self, value, context):
"""
Check if this numeric constant equals ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
return super(Number, self).equals(self._to_number(value), context)
def greater_than(self, value, context):
"""
Check if this numeric constant is greater than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
return self.constant_value > self._to_number(value)
def less_than(self, value, context):
"""
Check if this numeric constant is less than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
return self.constant_value < self._to_number(value)
def _to_number(self, value):
"""
Convert ``value`` to a Python float and return the new value.
:param value: The value to be converted into float.
:return: The value as a float.
:rtype: float
:raises InvalidOperationError: If ``value`` can't be converted.
"""
try:
return float(value)
except ValueError:
raise InvalidOperationError('"%s" is not a number' % value)
def __unicode__(self):
"""Return the Unicode representation of this constant number."""
return str(self.constant_value)
def __repr__(self):
"""Return the representation for this constant number."""
return '<Number %s>' % self.constant_value
class Set(Constant):
"""
Constant sets.
These constants support membership operations; see :meth:`contains` and
:meth:`is_subset`.
"""
operations = Constant.operations | {"inequality", "membership"}
def __init__(self, *items):
"""
:raises booleano.exc.InvalidOperationError: If at least one of the
``items`` is not an operand.
"""
for item in items:
if not isinstance(item, Operand):
raise InvalidOperationError('Item "%s" is not an operand, so '
'it cannot be a member of a set' %
item)
super(Set, self).__init__(set(items))
def to_python(self, context):
"""
Return a set made up of the Python representation of the operands
contained in this set.
"""
items = set(item.to_python(context) for item in self.constant_value)
return items
def equals(self, value, context):
"""Check if all the items in ``value`` are the same of this set."""
value = set(value)
return value == self.to_python(context)
def less_than(self, value, context):
"""
Check if this set has less items than the number represented in
``value``.
:raises InvalidOperationError: If ``value`` is not an integer.
"""
value = self._to_int(value)
return len(self.constant_value) < value
def greater_than(self, value, context):
"""
Check if this set has more items than the number represented in
``value``.
:raises InvalidOperationError: If ``value`` is not an integer.
"""
value = self._to_int(value)
return len(self.constant_value) > value
def belongs_to(self, value, context):
"""
Check that this constant set contains the ``value`` item.
"""
for item in self.constant_value:
try:
if item.equals(value, context):
return True
except InvalidOperationError:
continue
return False
def is_subset(self, value, context):
"""
Check that the ``value`` set is a subset of this constant set.
"""
for item in value:
if not self.belongs_to(item, context):
return False
return True
def check_equivalence(self, node):
"""
Make sure set ``node`` and this set are equivalent.
:param node: The other set which may be equivalent to this one.
:type node: Set
:raises AssertionError: If ``node`` is not a set or it's a set
with different elements.
"""
Operand.check_equivalence(self, node)
unmatched_elements = list(self.constant_value)
assert len(unmatched_elements) == len(node.constant_value), \
u'Sets %s and %s do not have the same cardinality' % \
(unmatched_elements, node)
# Checking that each element is represented by a mock operand:
for element in node.constant_value:
for key in range(len(unmatched_elements)):
if unmatched_elements[key] == element:
del unmatched_elements[key]
break
assert 0 == len(unmatched_elements), \
u'No match for the following elements: %s' % unmatched_elements
def __unicode__(self):
"""Return the Unicode representation of this constant set."""
elements = [str(element) for element in self.constant_value]
elements = u", ".join(elements)
return "{%s}" % elements
def __repr__(self):
"""Return the representation for this constant set."""
elements = [repr(element) for element in self.constant_value]
elements = ", ".join(elements)
if elements:
elements = " " + elements
return '<Set%s>' % elements
@classmethod
def _to_int(cls, value):
"""
Convert ``value`` is to integer if possible.
:param value: The value to be verified.
:return: ``value`` as integer.
:rtype: int
:raises InvalidOperationError: If ``value`` is not an integer.
This is a workaround for Python < 2.6, where floats didn't have the
``.is_integer()`` method.
"""
try:
value_as_int = int(value)
is_int = value_as_int == float(value)
except (ValueError, TypeError):
is_int = False
if not is_int:
raise InvalidOperationError("To compare the amount of items in a "
"set, the operand %s has to be an "
"integer" % repr(value))
return value_as_int
|
MikeDombo/Stock_Backtester
|
booleano/operations/operands/constants.py
|
constants.py
|
py
| 15,020 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "booleano.operations.operands.Operand",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sys.version_info",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pyparsing.ParseResults",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pyparsing.ParseResults",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "booleano.SafeEval.eval_expr",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "booleano.SafeEval",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "{'ParseResults': 'pyparsing.ParseResults', 're': 're', 'SafeEval': 'booleano.SafeEval'}",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "booleano.exc.InvalidOperationError",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "booleano.exc.InvalidOperationError",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "booleano.operations.operands.Operand",
"line_number": 416,
"usage_type": "argument"
},
{
"api_name": "booleano.exc.InvalidOperationError",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "booleano.exc.InvalidOperationError",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "booleano.operations.operands.Operand.check_equivalence",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "booleano.operations.operands.Operand",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "booleano.exc.InvalidOperationError",
"line_number": 542,
"usage_type": "call"
}
] |
86366418129
|
import numpy as np
import matplotlib.pyplot as plt
def radial_kernel(x0, X, tau):
return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * tau * tau))
def local_regression(x0, X, Y, tau):
# add bias term
x0 = np.r_[1, x0]
X = np.c_[np.ones(len(X)), X]
# fit model: normal equations with kernel
xw = X.T * radial_kernel(x0, X, tau)
beta = np.linalg.pinv(xw @ X) @ xw @ Y
# predict value
return x0 @ beta
def generate_data():
n = 1000
X = np.linspace(-3, 3, num=n)
Y = np.log(np.abs(X ** 2 - 1) + .5)
# Y = np.sin(X) + 0.3 * np.random.randn(n) #
plt.scatter(X, Y, s=5, color="green")
plt.savefig("LocalWeightedLinearRegression2-DataInitial.png")
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
# jitter X
X += np.random.normal(scale=.1, size=n)
plt.scatter(X, Y, s=5, color="green")
plt.savefig("LocalWeightedLinearRegression2-DatawithGitter.png")
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
return X, Y
def create_plot(X, Y, tau):
fig, axes = plt.subplots(3, 2, figsize=(16, 8), sharex=False, sharey=False, dpi=120)
# plt.subplots(3, 2 ) means display data in 3 rows and 2 columns
# Plot each axes
for i, ax in enumerate(axes.ravel()):
domain = np.linspace(-3, 3, num=40)
prediction = [local_regression(x0, X, Y, tau[i]) for x0 in domain]
ax.scatter(X, Y, s=5, color="green", label="actual")
ax.scatter(domain, prediction, s=5, color='red', label="prediction")
ax.set(
title="tau=" + str(tau[i]),
xlabel='X',
ylabel='Y',
)
ax.legend(loc='best')
plt.suptitle('Local Weight Linear regression', size=10, color='blue')
plt.savefig("LocalWeightedLinearRegression2-DataAndPrediction.png")
return plt
if __name__ == "__main__":
X, Y = generate_data()
tau = [800, 10, .1, .01, .08, .9]
myplot = create_plot(X, Y, tau)
myplot.show()
|
MitaAcharya/MachineLeaning
|
AndrewNG/Week2/Week2_LWR_Extra/LocalWeightedLinearRegression.py
|
LocalWeightedLinearRegression.py
|
py
| 2,053 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.exp",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.c_",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.pinv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cla",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.random.normal",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cla",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.