seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
74453123386
|
def solve(n, s, d, m):
combos = 0
i = 0
while i < len(s):
if sum(s[i:(i + m)]) == d:
combos += 1
i += 1
return combos
n = int(input().strip())
s = list(map(int, input().strip().split(' ')))
d, m = input().strip().split(' ')
d, m = [int(d), int(m)]
result = solve(n, s, d, m)
print(result)
|
em1382/hackerrank
|
algorithms/implementation/the-birthday-bar.py
|
the-birthday-bar.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8017799306
|
import tkinter as tk
from tkinter import ttk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy as np
class GraphPlt():
def __init__(self):
"""インスタンス化。Figureの作成
"""
self.fig = Figure(figsize=(6,6))
def plt(self, title="plot graph",topLevel=True,kind_grid=None, is_legend = False,is_twin=False, rotate_xlim=0,show_bar=20):
"""作成したグラフのプロット
Args:
title (str, optional): ウィンドウのタイトル. Defaults to "plot graph".
topLevel (bool, optional): ウィンドウをトップレベルにするか. Defaults to True.
kind_grid (str, optional): グリッドの有無と方向. Defaults to None.
is_legend (bool, optional): 凡例の有無. Defaults to False.
is_twin (bool, optional): 2軸グラフか否か. Defaults to False.
rotate_xlim (int, optional): x軸ラベルの角度. Defaults to 0.
show_bar (int, optional): スクロールのスケール. Defaults to 20.
"""
if topLevel:
frame = tk.Toplevel()
else:
frame = tk.Tk()
frame.focus_force()
frame.title(title)
canvasFrame = tk.Frame(frame)
canvasFrame.pack(side=tk.TOP)
controlFrame = tk.Frame(frame)
controlFrame.pack(side=tk.BOTTOM)
canvas = FigureCanvasTkAgg(self.fig, canvasFrame)
tmp = canvas.get_tk_widget()
tmp.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
showbars = show_bar
if not kind_grid == None:
self.ax.grid(axis = kind_grid,linestyle="--")
if is_legend:
self.ax.legend()
if is_twin:
self.ax.legend(self.twin_label_handle[0][0]+self.twin_label_handle[1][0],self.twin_label_handle[0][1]+self.twin_label_handle[1][1],loc=2,borderaxespad=0.)
self.fig.autofmt_xdate(rotation=rotate_xlim)
def draw_plot(pos):
pos_ = float(pos)
self.ax.set_xlim(pos_-1, pos_+showbars+1)
canvas.draw()
y_scale = ttk.Scale(controlFrame, from_=0.0, to=self.data_len-showbars, length=480, orient=tk.HORIZONTAL, command=draw_plot)
y_scale.pack(fill=tk.X)
btn = tk.Button(controlFrame, text="閉じる", command = frame.destroy)
btn.pack()
draw_plot(0)
def make_graph(self, x, y, graph_type,color = None,marker = None,label=None):
"""最初のグラフの作成
Args:
x (list)): x軸のデータ
y (list): y軸のデータ
graph_type (str)): グラフの種類
color (str, optional): グラフの色. Defaults to None.
label (str, optional): グラフのラベル. Defaults to None.
"""
self.axes_set()
self.handler, self.label = self.graph_set(self.ax,x, y, graph_type,color,marker,label)
def add_graph(self, x, y, graph_type,color = None,marker=None,label=None):
"""グラフの追加
Args:
x (list): x軸のデータ
y (list): y軸のデータ
graph_type (str)): グラフの種類
color (str, optional): グラフの色. Defaults to None.
label (str, optional): グラフのラベル. Defaults to None.
"""
self.graph_set(self.ax,x, y, graph_type,color,marker,label)
def twin_axes(self):
"""2軸グラフにする
"""
self.ax_t = self.ax.twinx()
self.twin_label_handle=list()
self.twin_label_handle.append((self.handler,self.label))
def twin_add_graph(self,x,y,graph_type,color=None, marker = None,label=None):
"""2軸グラフに新たなデータの追加
Args:
x (list): x軸のデータ
y (list)): y軸のデータ
graph_type (str): グラフの種類
color (str, optional): グラフの色. Defaults to None.
label (str, optional): グラフのラベル. Defaults to None.
"""
handler, labels=self.graph_set(self.ax_t,x,y,graph_type,color,marker,label)
self.twin_label_handle.append((handler,labels))
def axes_set(self):
"""新たなグラフの追加
"""
self.ax = self.fig.add_subplot(111)
def graph_set(self,ax,x,y,graph_type,color,marker,label):
"""グラフのセット
Args:
ax (plt.axes): 追加先のグラフ
x (list): x軸のデータ
y (list): y軸のデータ
graph_type (str): グラフの種類
color (str): グラフの色
label (str): グラフのラベル
Returns:
[type]: [description]
"""
g = self._graph_select(ax,graph_type)
if graph_type=="line":
g(x,y,color=color,label=label,marker=marker)
else:
g(x,y,color=color,label=label)
self.data_len = len(x)
handler, label = ax.get_legend_handles_labels()
return handler, label
def set_span_xlim(self,span,last=0,first=0,delta = None):
"""x軸の設定
Args:
span (int): 間隔
last (int, optional): 最後の数値. Defaults to 0.
first (int, optional): 最初のデータ. Defaults to 0.
delta (int, optional): last-firstの値. Defaults to None.
"""
if delta == None:
xtick = [first]
xtick.extend([i * span for i in range(1, (( last-first -1)//span) + 1)])
else:
xtick = [1]
xtick.extend([i * span for i in range(1, ((delta-1)//span) + 1)])
self.ax.set_xticks(xtick)
def _graph_select(self,ax,graph_type):
"""graph_typeから適するグラフメソッドを返す
Args:
ax (plt.axes): 対象のグラフ
graph_type (str): グラフタイプ.line:折れ線, bar:棒
Returns:
plt.axes.method: 適切なグラフメソッド
"""
if graph_type == "bar":
return ax.bar
elif graph_type == "line":
return ax.plot
|
komepi/komepiTkinter
|
komepiTkinter/GraphPlt.py
|
GraphPlt.py
|
py
| 6,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18801821077
|
import requests
requests.packages.urllib3.disable_warnings() # noqa
url = "https://api.github.com/repos/milvus-io/milvus/actions/workflows"
payload = {}
token = "" # your token
headers = {
"Authorization": f"token {token}",
}
response = requests.request("GET", url, headers=headers, data=payload)
def analysis_workflow(workflow_name, workflow_response):
"""
Used to count the number of successes and failures of jobs in the chaos test workflow,
so as to understand the robustness of different components(each job represents a component).
"""
workflow_id = [w["id"] for w in workflow_response.json()["workflows"] if workflow_name in w["name"]][0]
runs_response = requests.request("GET", f"https://api.github.com/repos/milvus-io/milvus/actions/workflows/{workflow_id}/runs", headers=headers, data=payload, verify=False)
workflow_runs = [r["id"] for r in runs_response.json()["workflow_runs"] if r["status"] == "completed" and r["event"] == "schedule"]
results = {}
for run in workflow_runs:
job_url = f"https://api.github.com/repos/milvus-io/milvus/actions/runs/{run}/jobs"
job_response = requests.request("GET", job_url, headers=headers, data=payload, verify=False)
for r in job_response.json()["jobs"]:
if r["name"] not in results:
results[r["name"]] = {"success": 0, "failure": 0}
if r["status"] == "completed" and r["conclusion"] == "success":
results[r["name"]]["success"] += 1
elif r["status"] == "completed" and r["conclusion"] != "success":
results[r["name"]]["failure"] += 1
return results
for workflow in ["Pod Kill"]:
result = analysis_workflow(workflow, response)
print(f"{workflow}:")
for k, v in result.items():
print(f"{k} success: {v['success']}, failure: {v['failure']}")
print("\n")
|
milvus-io/milvus
|
tests/python_client/chaos/scripts/workflow_analyse.py
|
workflow_analyse.py
|
py
| 1,895 |
python
|
en
|
code
| 24,190 |
github-code
|
6
|
36396180382
|
from ast import Lambda
from itertools import groupby
import pandas as pd
import pymysql
from pyecharts.charts import Bar, Line, Pie
from pyecharts import options as opts
conn = pymysql.connect(host="localhost", user="root",
passwd="123456", port=3306, db="591")
cursor = conn.cursor()
sql = 'select * from rent'
cursor.execute(sql)
result = cursor.fetchall()
df = pd.read_sql(sql, conn)
location_group = df.groupby(["location"])
location_com = location_group["price"].aggregate(["mean", "count"])
location_com.reset_index(inplace=True)
location_message = location_com.sort_values("count", ascending=False)
locationr = location_message["location"]
l1 = location_message["count"]
l2 = location_message["mean"]
l2 = ['%.2f' % i for i in l2.tolist()]
# print(l2)
# print(location_com)
# #%%
# #租房數&租金直方折線圖
bar = (
Bar(init_opts=opts.InitOpts(width="1200px", height="500px", theme='light'))
.add_xaxis(locationr.tolist())
.add_yaxis("房屋出租數", l1.tolist())
.extend_axis(
yaxis=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 元"), interval=10000
)
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="台北市各行政區出租房數&平均租金"),
yaxis_opts=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 間")),
)
)
line = Line().add_xaxis(locationr.tolist()).add_yaxis(
"平均租金", l2, yaxis_index=1)
bar.overlap(line)
bar.render("台北市各行政區出租房數&平均租金.html")
# 出租房面積圓環圖
square_info = df['size'].astype(float)
print(type(square_info[0]))
bins = [0, 10, 20, 40, 60, 100, 300]
level = ['0-10坪', '10-20坪', '20-40坪', '40-60坪', '60-100坪', '100-300坪']
square_stage = pd.cut(square_info, bins=bins,
labels=level).value_counts().sort_index()
attr = square_stage.index.tolist()
v1 = square_stage.values.tolist()
pie = (
Pie()
.add("", [list(z)for z in zip(attr, v1)], radius=[80, 150])
# 加入百分比
.set_series_opts(label_opts=opts.LabelOpts(is_show=True, formatter="{d}%"))
.set_global_opts(title_opts=opts.TitleOpts(title="台北市出租房房屋面積分布",
pos_left="center",
pos_bottom="center"),
legend_opts=opts.LegendOpts(pos_left="left",
orient="vertical"),
))
pie.render("台北市出租房房屋面積分布.html")
# #%%
# 出租房型疊加直方圖
mask = (df["kind"] != "其他") # 過濾掉其他
location_group = df[mask].groupby("location")
location_values = [k[0] for k in location_group]
gp = df[mask].sort_values("location").groupby("kind")
s1 = gp.get_group("獨立套房")["location"].value_counts().tolist()
s2 = gp.get_group("分租套房")["location"].value_counts().tolist()
s3 = gp.get_group("雅房")["location"].value_counts().tolist()
s4 = gp.get_group("整層住家")["location"].value_counts().tolist()
# s5 = gp.get_group("車位")["location"].value_counts().tolist()
bar = (
Bar(init_opts=opts.InitOpts(width="1200px", height="500px", theme='light'))
.add_xaxis(location_values)
.add_yaxis("獨立套房", s1, stack="stack1")
.add_yaxis("分租套房", s2, stack="stack1")
.add_yaxis("雅房", s3, stack="stack1")
.add_yaxis("整層住家", s4, stack="stack1")
# .add_yaxis("車位", s5, stack="stack1")
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="房型分類"),
yaxis_opts=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 間")),
)
)
bar.render("台北市各行政區出租房型分類.html")
# -----------------------------------------
# 單坪租金折現面積圖
df["location"] = df["location"].apply(
lambda x: "".join([i for i in x if not i.isdigit()]))
df = (df[df["kind"].isin(["雅房", "整層住家", "獨立套房", "分租套房", "車位"])].
groupby(["location", "kind"])[["price", "size"]]
.sum()
.reset_index()
.sort_values("location"))
df.insert(4, column="average", value=df["price"]//df["size"])
line = (
Line()
.add_xaxis(location_values)
.add_yaxis("雅房", df.groupby("kind").get_group("雅房")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.5))
.add_yaxis("整層住家", df.groupby("kind").get_group("整層住家")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.4))
.add_yaxis("獨立套房", df.groupby("kind").get_group("獨立套房")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.3))
.add_yaxis("分租套房", df.groupby("kind").get_group("分租套房")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.2))
# .add_yaxis("車位", df.groupby("kind").get_group("車位")["average"],
# areastyle_opts=opts.AreaStyleOpts(opacity=0.1))
.set_global_opts(title_opts=opts.TitleOpts(title="各房型單坪租金"))
)
line.render("單坪租金圖.html")
# ------------------------------------------------------
|
dichotomania/project
|
visual.py
|
visual.py
|
py
| 5,336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24847039251
|
import pygame
class Character():
"""Create a character - inherits from the Sprite class"""
def __init__(self, screen, game_settings):
self.screen = screen
self.settings = game_settings
# Load the character image and get its rect
self.image = pygame.image.load("images/Robot.png")
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start the character at the bottom-left of the screen
self.rect.left = self.screen_rect.left
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the character's center
self.centerx = float(self.rect.centerx)
self.bottom = float(self.rect.bottom)
self.start_y = 0.0
# Movement flags
self.moving_right = False
self.moving_left = False
self.jumping = False
self.falling = False
# Get width of character for entering and exiting screen
self.width = self.rect.width
self.height = self.rect.height
def update(self):
"""Update the character's position on the screen"""
# Update center for float value
if self.moving_right and self.rect.right < self.screen_rect.right + self.width:
self.centerx += self.settings.movement_speed
if self.moving_left and self.rect.left > 0:
self.centerx -= self.settings.movement_speed
if self.jumping:
self.bottom -= self.settings.jumping_speed
if self.bottom <= (self.start_y - self.settings.jump_height):
self.jumping = False
if self.falling:
self.bottom += self.settings.gravity
# Update rect using new centerx and bottom values
self.rect.centerx = self.centerx
self.rect.bottom = self.bottom
def reset(self):
"""Reset the character to the bottom-left of the screen"""
self.centerx = self.width / 2
self.bottom = self.screen_rect.bottom
def blitme(self):
"""Draw the character at its present location"""
self.screen.blit(self.image, self.rect)
|
YorkshireStu83/Flatpormer
|
character.py
|
character.py
|
py
| 2,187 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29285921224
|
from rply import LexerGenerator # type: ignore
# lexer for the "calc" command
lg = LexerGenerator()
lg.add('NUMBER', r'[0-9]+[\.]?[0-9]*') # number token
lg.add('ADDITION', r'\+') # operator tokens
lg.add('SUBTRACTION', r'-')
lg.add('MULTIPLICATION', r'\*')
lg.add('EXPONENT', r'\^')
lg.add('DIVISION', r'\/')
lg.add('FIRST_BRACKETS', r'\[') # grouping tokens
lg.add('SECOND_BRACKETS', r'\]')
lg.add('FIRST_BRACES', r'\{')
lg.add('SECOND_BRACES', r'\}')
lg.add('FIRST_PARENTHESIS', r'\(')
lg.add('SECOND_PARENTHESIS', r'\)')
lg.ignore('\s+') # ignore whitespace
l = lg.build()
|
Chrovo/Productivity
|
cogs/utils/lexer.py
|
lexer.py
|
py
| 582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20143910497
|
import cv2
import argparse
def decode_fourcc(fourcc):
# Decodes the fourcc value to get the 4 chars identifying it
fourcc_int = int(fourcc)
# Print the value of fourcc
print("int value of fourcc: {}".format(fourcc_int))
#return "".join([chr((fourcc_int >> 8 * i) & 0xFF) for i in range(4)])
fourcc_decode = ""
for i in range(4):
int_value = fourcc_int >> 8 * i & 0xFF
print("int value: {}".format(int_value))
fourcc_decode += chr(int_value)
return fourcc_decode
|
Raylow00/OpenCV-Tutorials
|
1_Opencv_basics/6_decode_fourcc.py
|
6_decode_fourcc.py
|
py
| 524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34622698240
|
S = input()
UCPC = "UCPC"
j = 0
for i in range(len(S)):
if S[i] == UCPC[j]:
j += 1
if j == 4:
print("I love UCPC")
quit()
print("I hate UCPC")
|
ktan9811/BOJ
|
10000~/15904.py
|
15904.py
|
py
| 187 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22021920480
|
'''
Jessica Dutton
Store Manager (user entity)
'''
from google.cloud import datastore
from flask import Blueprint, Flask, request, make_response
import json
import constants
from google.oauth2 import id_token
from google.auth.transport import requests
client = datastore.Client()
bp = Blueprint('store_manager', __name__, url_prefix='/store_managers')
@bp.route('', methods=['GET'])
def store_managers_get():
if request.method == 'GET':
store_managers = []
if 'application/json' not in request.accept_mimetypes:
response = {"Error": "Response type must be JSON"}
res = make_response(response)
res.mimetype = 'application/json'
res.status_code = 406
return res
# get the dvds
query = client.query(kind=constants.dvds)
results = list(query.fetch())
for dvd in results:
if dvd['store_manager'] not in store_managers:
store_managers.append(dvd['store_manager'])
if len(store_managers) != 0:
response = {"List of store manager IDs": store_managers}
else:
response = {"Results" : "No store managers currently registered in app"}
return (response, 200)
else:
return 'Method not recognized'
|
jdutt25/dvd_store
|
store_manager.py
|
store_manager.py
|
py
| 1,310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36807817632
|
import numpy as np
from scipy import signal
from scipy.signal import butter, lfilter
def createSpec(signals, sr, n_channels=22):
# Reference: https://github.com/MesSem/CNNs-on-CHB-MIT, DataSetToSpectrogram
n_channels = min(n_channels, 22)
for channel in range(n_channels):
y = signals[channel]
Pxx = signal.spectrogram(y, nfft=sr, fs=sr, return_onesided=True, noverlap=128)[2]
Pxx = np.delete(Pxx, np.s_[117:123 + 1], axis=0)
Pxx = np.delete(Pxx, np.s_[57:63 + 1], axis=0)
Pxx = np.delete(Pxx, 0, axis=0)
result = ((10 * np.log10(Pxx).T - (10 * np.log10(Pxx)).T.mean(axis=0)) / (10 * np.log10(Pxx)).T.std(axis=0))
if channel == 0:
spect = np.zeros((n_channels, *result.shape))
result = np.nan_to_num(result)
spect[channel] = result
return spect
# Filtro taglia banda
def butter_bandstop_filter(data, lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
i, u = butter(order, [low, high], btype='bandstop')
y = lfilter(i, u, data)
return y
# Filtro taglia banda, passa alta
def butter_highpass_filter(data, cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='high', analog=False)
y = lfilter(b, a, data)
return y
|
koike-ya/eeglibrary
|
eeglibrary/src/chb_mit_cnn_spectrogram.py
|
chb_mit_cnn_spectrogram.py
|
py
| 1,357 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4456513686
|
from dataclasses import dataclass
from fnmatch import fnmatch
from typing import cast, Optional
from urllib.parse import urljoin
from .config import Config
from .download import DownloaderMixIn, HTTPXDownloaderMixIn
from .exceptions import (
ArbitrarySoftwareAttack,
DownloadNotFoundError,
EndlessDataAttack,
FreezeAttack,
InconsistentTargetError,
MixAndMatchAttack,
NoConsistentSnapshotsError,
RollbackAttack,
TargetNotFoundError,
)
from .models.common import (
Comparable,
Filepath,
Hash,
Hashes,
Length,
Positive,
Rolename,
Rolenames,
Url,
Version,
)
from .models.metadata import (
Metadata,
Root,
Signed,
Snapshot,
TargetFile,
Targets,
ThresholdOfPublicKeys,
TimeSnap,
Timestamp,
)
from .readers import JSONReaderMixIn, ReaderMixIn
from .writers import WriterMixIn
@dataclass
class Target:
path: Filepath
target: TargetFile
# This is a Repository, not a Client, because I want to make it clear that you
# can compose these objects to traverse multiple Repositories.
class Repository(WriterMixIn, DownloaderMixIn, ReaderMixIn):
"""A class to abstractly handle the TUF client application workflow for a
single repository.
Do not instantiate this class."""
ROOT_ROLENAME = "root"
SNAPSHOT_ROLENAME = "snapshot"
TARGETS_ROLENAME = "targets"
TIMESTAMP_ROLENAME = "timestamp"
def __init__(self, config: Config):
super().init_downloader()
self.config = config
self.__refresh()
def close(self) -> None:
self.config.close()
super().close_downloader()
def __check_expiry(self, signed: Signed) -> None:
if signed.expires <= self.config.NOW:
raise FreezeAttack(f"{signed}: {signed.expires} <= {self.config.NOW}")
def __check_hashes(self, abspath: Filepath, expected: Hashes) -> None:
if not self.check_hashes(abspath, expected):
raise ArbitrarySoftwareAttack(f"{abspath} != {expected}")
def __check_length(self, abspath: Filepath, expected: Length) -> None:
if not self.check_length(abspath, expected):
raise EndlessDataAttack(f"{abspath} > {expected} bytes")
def __check_rollback(self, prev: Comparable, curr: Comparable) -> None:
if prev > curr:
raise RollbackAttack(f"{prev} > {curr}")
def __check_signatures(
self, role: ThresholdOfPublicKeys, metadata: Metadata
) -> None:
if not role.verified(metadata.signatures, metadata.canonical):
raise ArbitrarySoftwareAttack(f"{metadata.signed}")
def __check_version(self, signed: Signed, timesnap: TimeSnap) -> None:
if signed.version != timesnap.version:
raise MixAndMatchAttack(f"{signed.version} != {timesnap.version}")
def __local_metadata_filename(self, rolename: Rolename) -> Filepath:
return self.join_path(self.config.metadata_cache, self.role_filename(rolename))
def __local_targets_filename(self, relpath: Filepath) -> Filepath:
return self.join_path(self.config.targets_cache, relpath)
def __remote_metadata_filename(
self, rolename: Rolename, version: Version
) -> Filepath:
return f"{version.value}.{self.role_filename(rolename)}"
def __remote_metadata_path(self, relpath: Filepath) -> Url:
return urljoin(self.config.metadata_root, relpath)
def __remote_targets_path(self, relpath: Filepath, _hash: Hash) -> Url:
dirname, basename = self.split_path(relpath)
basename = f"{_hash}.{basename}"
relpath = self.join_path(dirname, basename)
return urljoin(self.config.targets_root, relpath)
def __refresh(self) -> None:
"""Refresh metadata for root, timestamp, and snapshot so that we have a
consistent snapshot of the repository."""
try:
self.__load_root()
self.__update_root()
self.__update_timestamp()
self.__update_snapshot()
except Exception:
self.close()
raise
def __load_root(self) -> None:
"""5.1. Load the trusted root metadata file."""
# NOTE: we must parse the root metadata file on disk in order to get
# the keys to verify itself in the first place.
filename = self.__local_metadata_filename(self.ROOT_ROLENAME)
metadata = self.read_from_file(filename)
# FIXME: the following line is purely to keep mypy happy; otherwise,
# it complains that the .signed.root attribute does not exist.
metadata.signed = cast(Root, metadata.signed)
# Verify self-signatures on previous root metadata file.
self.__check_signatures(metadata.signed.root, metadata)
# NOTE: the expiration of the trusted root metadata file does not
# matter, because we will attempt to update it in the next step.
# We do not support non-consistent-snapshot repositories.
if not metadata.signed.consistent_snapshot:
raise NoConsistentSnapshotsError
# Now that we have verified signatures, throw them away, and set the
# current root to the actual metadata of interest.
self.__root = metadata.signed
def __update_root(self) -> None:
"""5.2. Update the root metadata file."""
# 5.2.1. Let N denote the version number of the trusted root metadata
# file.
prev_root = self.__root
curr_root = prev_root
n = curr_root.version
# 5.2.8. Repeat steps 5.2.1 to 5.2.8.
for _ in range(self.config.MAX_ROOT_ROTATIONS):
# 5.2.2. Try downloading version N+1 of the root metadata file.
n += 1
remote_filename = self.__remote_metadata_filename(self.ROOT_ROLENAME, n)
remote_path = self.__remote_metadata_path(remote_filename)
try:
tmp_file = self.download(
remote_path, self.config.MAX_ROOT_LENGTH, self.config
)
except DownloadNotFoundError:
break
self.__check_length(tmp_file, self.config.MAX_ROOT_LENGTH)
# 5.2.3. Check for an arbitrary software attack.
metadata = self.read_from_file(tmp_file)
metadata.signed = cast(Root, metadata.signed)
self.__check_signatures(curr_root.root, metadata)
self.__check_signatures(metadata.signed.root, metadata)
# 5.2.4. Check for a rollback attack.
if metadata.signed.version != n:
raise RollbackAttack(
f"{metadata.signed.version} != {n} in {remote_path}"
)
# 5.2.5. Note that the expiration of the new (intermediate) root
# metadata file does not matter yet.
# 5.2.6. Set the trusted root metadata file to the new root metadata
# file.
curr_root = metadata.signed
# 5.2.9. Check for a freeze attack.
self.__check_expiry(curr_root)
if prev_root < curr_root:
# 5.2.11. Set whether consistent snapshots are used as per the
# trusted root metadata file.
# NOTE: We violate the spec in checking this *before* deleting local
# timestamp and/or snapshot metadata, which I think is reasonable.
if not curr_root.consistent_snapshot:
raise NoConsistentSnapshotsError
# 5.2.10. If the timestamp and / or snapshot keys have been rotated,
# then delete the trusted timestamp and snapshot metadata files.
if (
self.__root.timestamp != curr_root.timestamp
or self.__root.snapshot != curr_root.snapshot
):
filename = self.__local_metadata_filename(self.SNAPSHOT_ROLENAME)
if self.file_exists(filename):
self.rm_file(filename)
filename = self.__local_metadata_filename(self.TIMESTAMP_ROLENAME)
if self.file_exists(filename):
self.rm_file(filename)
# 5.2.7. Persist root metadata.
# NOTE: We violate the spec in persisting only *after* checking
# everything, which I think is reasonable.
self.mv_file(tmp_file, self.__local_metadata_filename(self.ROOT_ROLENAME))
self.__root = curr_root
def __get_prev_metadata(self, rolename: Rolename) -> Optional[Metadata]:
filename = self.__local_metadata_filename(rolename)
if self.file_exists(filename):
return self.read_from_file(filename)
return None
def __update_timestamp(self) -> None:
"""5.3. Download the timestamp metadata file."""
role_filename = self.role_filename(self.TIMESTAMP_ROLENAME)
remote_path = self.__remote_metadata_path(role_filename)
tmp_file = self.download(
remote_path, self.config.MAX_TIMESTAMP_LENGTH, self.config
)
self.__check_length(tmp_file, self.config.MAX_TIMESTAMP_LENGTH)
# 5.3.1. Check for an arbitrary software attack.
curr_metadata = self.read_from_file(tmp_file)
curr_metadata.signed = cast(Timestamp, curr_metadata.signed)
self.__check_signatures(self.__root.timestamp, curr_metadata)
# 5.3.2. Check for a rollback attack.
prev_metadata = self.__get_prev_metadata(self.TIMESTAMP_ROLENAME)
if prev_metadata:
prev_metadata.signed = cast(Timestamp, prev_metadata.signed)
self.__check_rollback(prev_metadata.signed, curr_metadata.signed)
self.__check_rollback(
prev_metadata.signed.snapshot, curr_metadata.signed.snapshot
)
# 5.3.3. Check for a freeze attack.
self.__check_expiry(curr_metadata.signed)
# 5.3.4. Persist timestamp metadata.
self.mv_file(tmp_file, self.__local_metadata_filename(self.TIMESTAMP_ROLENAME))
self.__timestamp = curr_metadata.signed
def __update_snapshot(self) -> None:
"""5.4. Download snapshot metadata file."""
prev_metadata = self.__get_prev_metadata(self.SNAPSHOT_ROLENAME)
obsolete = (
not prev_metadata
or prev_metadata.signed.version < self.__timestamp.snapshot.version
)
local_filename = self.__local_metadata_filename(self.SNAPSHOT_ROLENAME)
length = self.__timestamp.snapshot.length or self.config.MAX_SNAPSHOT_LENGTH
# Download metadata only if not cached or if it is obsolete.
if not obsolete:
tmp_file = local_filename
else:
remote_filename = self.__remote_metadata_filename(
self.SNAPSHOT_ROLENAME, self.__timestamp.snapshot.version
)
remote_path = self.__remote_metadata_path(remote_filename)
tmp_file = self.download(remote_path, length, self.config)
self.__check_length(tmp_file, length)
# 5.4.1. Check against timestamp role's snapshot hash.
if self.__timestamp.snapshot.hashes:
self.__check_hashes(tmp_file, self.__timestamp.snapshot.hashes)
# 5.4.2. Check for an arbitrary software attack.
curr_metadata = self.read_from_file(tmp_file)
curr_metadata.signed = cast(Snapshot, curr_metadata.signed)
self.__check_signatures(self.__root.snapshot, curr_metadata)
# 5.4.3. Check against timestamp role's snapshot version.
self.__check_version(curr_metadata.signed, self.__timestamp.snapshot)
# 5.4.4. Check for a rollback attack.
if prev_metadata:
prev_metadata.signed = cast(Snapshot, prev_metadata.signed)
for filename, prev_timesnap in prev_metadata.signed.targets.items():
curr_timesnap = curr_metadata.signed.targets.get(filename)
if not curr_timesnap:
raise RollbackAttack(
f"{filename} was in {prev_metadata.signed.version} but missing in {curr_metadata.signed.version}"
)
self.__check_rollback(prev_timesnap, curr_timesnap)
# 5.4.5. Check for a freeze attack.
self.__check_expiry(curr_metadata.signed)
# 5.4.6. Persist snapshot metadata.
if obsolete:
self.mv_file(tmp_file, local_filename)
self.__snapshot = curr_metadata.signed
def __preorder_dfs(
self,
targets: Targets,
target_relpath: Filepath,
visited: Rolenames,
counter: Positive,
) -> Optional[TargetFile]:
target_file = targets.targets.get(target_relpath)
if target_file:
return target_file
else:
for rolename, delegation in targets.delegations.items():
if rolename not in visited:
for path in delegation.paths:
if fnmatch(target_relpath, path):
target_file = self.__update_targets(
visited,
counter + 1,
rolename,
delegation.role,
target_relpath,
)
if target_file or delegation.terminating:
return target_file
return None
def __update_targets(
self,
visited: Rolenames,
counter: Positive,
rolename: Rolename,
role: ThresholdOfPublicKeys,
target_relpath: Filepath,
) -> Optional[TargetFile]:
"""5.5. Download the top-level targets metadata file."""
if rolename in visited or counter > self.config.MAX_PREORDER_DFS_VISITS:
return None
visited.add(rolename)
role_filename = self.role_filename(rolename)
timesnap = self.__snapshot.targets.get(role_filename)
if not timesnap:
raise MixAndMatchAttack(f"{rolename} not in {self.__snapshot}")
prev_metadata = self.__get_prev_metadata(rolename)
obsolete = not prev_metadata or prev_metadata.signed.version < timesnap.version
local_filename = self.__local_metadata_filename(rolename)
length = timesnap.length or self.config.MAX_TARGETS_LENGTH
# Download metadata only if not cached or if it is obsolete.
if not obsolete:
tmp_file = local_filename
else:
remote_filename = self.__remote_metadata_filename(
rolename, timesnap.version
)
remote_path = self.__remote_metadata_path(remote_filename)
tmp_file = self.download(remote_path, length, self.config)
self.__check_length(tmp_file, length)
# 5.5.1. Check against snapshot role's targets hash.
if timesnap.hashes:
self.__check_hashes(tmp_file, timesnap.hashes)
# 5.5.2. Check for an arbitrary software attack.
curr_metadata = self.read_from_file(tmp_file)
curr_metadata.signed = cast(Targets, curr_metadata.signed)
self.__check_signatures(role, curr_metadata)
# 5.5.3. Check against snapshot role's targets version.
self.__check_version(curr_metadata.signed, timesnap)
# 5.5.4. Check for a freeze attack.
self.__check_expiry(curr_metadata.signed)
# 5.5.5. Persist targets metadata.
if obsolete:
self.mv_file(tmp_file, local_filename)
# 5.5.6. Perform a pre-order depth-first search for metadata about the
# desired target, beginning with the top-level targets role.
return self.__preorder_dfs(
curr_metadata.signed, target_relpath, visited, counter
)
def __get_target(self, target_file: TargetFile, relpath: Filepath) -> Filepath:
for _hash in target_file.hashes.values():
remote_path = self.__remote_targets_path(relpath, _hash)
try:
return self.download(remote_path, target_file.length, self.config)
except DownloadNotFoundError:
continue
raise InconsistentTargetError(f"{relpath}")
# FIXME: consider using a context manager for cleanup.
def get(self, relpath: Filepath) -> Target:
"""Use this function to securely download and verify an update."""
try:
# 5.6. Verify the desired target against its targets metadata.
target_file = self.__update_targets(
set(), Positive(1), self.TARGETS_ROLENAME, self.__root.targets, relpath
)
# 5.6.2. Otherwise (if there is targets metadata about this target),
# download the target, and verify that its hashes match the targets
# metadata.
if target_file:
local_path = self.__local_targets_filename(relpath)
file_exists = self.file_exists(local_path)
# Download target only if not cached.
if file_exists:
tmp_file = local_path
else:
tmp_file = self.__get_target(target_file, relpath)
self.__check_length(tmp_file, target_file.length)
self.__check_hashes(tmp_file, target_file.hashes)
if not file_exists:
self.mv_file(tmp_file, local_path)
return Target(local_path, target_file)
except Exception as e:
self.close()
raise TargetNotFoundError(f"{relpath}") from e
else:
# 5.6.1. If there is no targets metadata about this target, abort
# the update cycle and report that there is no such target.
self.close()
raise TargetNotFoundError(f"{relpath}")
class JSONRepository(Repository, HTTPXDownloaderMixIn, JSONReaderMixIn):
"""Instantiate this class to read canonical JSON TUF metadata from a
remote repository."""
pass
|
trishankatdatadog/tuf-on-a-plane
|
src/tuf_on_a_plane/repository.py
|
repository.py
|
py
| 18,018 |
python
|
en
|
code
| 4 |
github-code
|
6
|
10795571321
|
"""
作为初始代码,目标是有的一个固定大小的球,给定一个初识速度,和一个固定阻力,演示球运动的过程。每一个循环的时间与现实时间一致。
从最初代码开始演示如何一步步完善代码最终完成功能的过程。
"""
import sys, pygame
import os.path
import random
import time
import math
#pygame.font.init()
#myfont = pygame.font.SysFont('Comic Sans MS', 30)
def hit_A(x, y):
return y <= 10
def hit_B(x, y):
return x >= 590
def hit_C(x, y):
return y >= 470
def hit_D(x, y):
return x <= 10
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
print(BASE_DIR)
pygame.init()
screen = pygame.display.set_mode((600, 480))
ball_color = [255, 0, 0]
x, y = random.randint(10, 590), random.randint(10, 390)
speed = random.randint(100, 300)
u = random.random()*math.pi
f = 20
t = time.time()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill((255, 255, 255))
t1 = time.time()
dt = t1-t
d = speed*dt
dx, dy = d*math.cos(u), d*math.sin(u)
x += dx
y += dy
if hit_B(x, y):
u = math.pi - u
elif hit_C(x,y):
u = 2*math.pi - u
elif hit_D(x,y):
u = 3*math.pi - u
elif hit_A(x, y):
u = 2*math.pi - u
speed -= f*dt
if speed < 0:
speed = 0
# 在前进方向上移动一步
pygame.draw.circle(screen, ball_color, (int(x), int(y)), 10)
#textsurface = myfont.render(f'{dx} {dy}', False, (0, 0, 0))
#screen.blit(textsurface,(x,y))
pygame.display.flip()
t = t1
|
sillyemperor/pygame-study
|
rebound-ball-walls.py
|
rebound-ball-walls.py
|
py
| 1,622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19346820391
|
from pyparsing import FollowedBy
import tweepy
from app.config import TWITTER_API_KEY,TWITTER_API_KEY_SECRET,get_logger
class TwitterAPI:
def __init__(self,access_token,access_token_secret) -> None:
self.api_key = TWITTER_API_KEY
self.api_key_secret = TWITTER_API_KEY_SECRET
self.access_token = access_token
self.access_token_secret = access_token_secret
self.logger = get_logger()
auth = tweepy.OAuth1UserHandler(self.api_key,self.api_key_secret,self.access_token,self.access_token_secret)
self.api = tweepy.API(auth,parser=tweepy.parsers.JSONParser())
def get_user_profile(self,user_id,screen_name):
try:
profile = self.api.get_user(user_id=user_id,screen_name=screen_name,include_entities=1)
except Exception as e:
self.logger.error(f"Something went wrong while getting user profile: {str(e)}")
return None
return{
"user_id":profile['id_str'],
"name":profile['name'],
"description":profile['description'],
"screen_name":profile['screen_name'],
"profile_image_url":profile['profile_image_url']
}
def get_user_feed(self, page_num):
feed = []
try:
for page in tweepy.Cursor(self.api.home_timeline, tweet_mode="extended").pages(page_num):
feed = page
except Exception as e:
self.logger.error(f"Something went wrong while fetching Feed: {str(e)}")
return None
return feed
# geocode format --> '18.520430,73.856743,25km' (string)
def get_searched_tweets(self, query, page ,geocode):
searched_tweets=None
try:
for pageResult in tweepy.Cursor(self.api.search_tweets, q=query, geocode=geocode, tweet_mode="extended").pages(page):
searched_tweets = pageResult
except Exception as e:
self.logger.error(f"Something went wrong while searching Topic: {str(e)}")
return None
return searched_tweets
def upload_media(self,filename):
try:
media = self.api.media_upload(filename=filename)
except Exception as e:
self.logger.error(f"Something went wrong while uploading the File: {str(e)}")
return None
return media['media_id_string']
def create_tweet(self,text,media_ids):
try:
new_tweet = self.api.update_status(status =text,media_ids = media_ids)
except Exception as e:
self.logger.error(f"Something went wrong while creating new tweet: {str(e)}")
return None
return new_tweet
def reply_tweet(self, text, media_ids, tweet_id):
try:
reply = self.api.update_status(status = text, media_ids = media_ids, in_reply_to_status_id = tweet_id , auto_populate_reply_metadata=True)
except Exception as e:
self.logger.error(f"Something went wrong while replying to tweet: {str(e)}")
return None
return reply
def get_tweet(self, tweet_id):
try:
tweet = self.api.get_status(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while fetching tweet by id: {str(e)}")
return None
return tweet
def favorites_tweet(self, tweet_id):
try:
fav_tweet = self.api.create_favorite(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while liking the tweet: {str(e)}")
return None
return fav_tweet
def destory_favorite_tweet(self,tweet_id):
try:
destroy_tweet = self.api.destroy_favorite(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while disliking the tweet: {str(e)}")
return None
return destroy_tweet
def re_tweet(self, tweet_id):
try:
retweet = self.api.retweet(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while retweeting: {str(e)}")
return None
return retweet
def un_retweet(self, tweet_id):
try:
retweet = self.api.unretweet(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while unretweeting the tweet: {str(e)}")
return None
return retweet
def get_user_posts(self, user_id, screen_name, page):
posts = []
try:
for page in tweepy.Cursor(self.api.user_timeline, user_id=user_id, screen_name=screen_name, tweet_mode="extended").pages(page):
posts = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching Posts: {str(e)}")
return None
return posts
def get_user_mentions_timeline(self, page):
mentions = []
try:
for page in tweepy.Cursor(self.api.mentions_timeline, tweet_mode="extended").pages(page):
mentions = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching Posts: {str(e)}")
return None
return mentions
def get_user_followers(self, user_id, screen_name, page):
followers = []
try:
for page in tweepy.Cursor(self.api.get_followers, user_id=user_id, screen_name=screen_name, tweet_mode="extended").pages(page):
followers = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching User Followers: {str(e)}")
return None
return followers
def get_user_following(self, user_id, screen_name, page):
following = []
try:
for page in tweepy.Cursor(self.api.get_friends, user_id=user_id, screen_name=screen_name, tweet_mode="extended").pages(page):
following = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching User Following: {str(e)}")
return None
return following
def get_user(self, user_id, screen_name):
try:
profile = self.api.get_user(
user_id=user_id, screen_name=screen_name)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching User Profile: {str(e)}")
return None
return profile
def follow_user(self, user_id):
try:
follow_tweet = self.api.create_friendship(
user_id=user_id, follow=True)
except Exception as e:
self.logger.error(
f"Something went wrong while following the user: {str(e)}")
return None
return follow_tweet
def unfollow_user(self, user_id):
try:
unfollow_tweet = self.api.destroy_friendship(
user_id=user_id, follow=True)
except Exception as e:
self.logger.error(
f"Something went wrong while following the user: {str(e)}")
return None
return unfollow_tweet
def get_followers(self, user_id):
try:
followers = self.api.get_followers(user_id=user_id)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user followers: {str(e)}")
return None
return followers
def get_my_tweets(self, user_id):
try:
tweets = self.api.user_timeline(tweet_mode="extended")
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user tweets: {str(e)}")
return None
return tweets
def get_my_200_tweets(self, user_id):
try:
tweets = self.api.user_timeline(tweet_mode="extended",count=200)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user tweets: {str(e)}")
return None
return tweets
def get_mentions(self, user_id):
try:
timeline = self.api.mentions_timeline(
user_id=user_id, include_rts=False)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user mentions timeline: {str(e)}")
return None
return timeline
def get_closest_trends(self,latitude,longitude):
try:
trends = self.api.closest_trends(latitude,longitude)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching closest trends: {str(e)}")
return None
return trends
def get_place_trends(self,woeId):
try:
trends = self.api.get_place_trends(woeId)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching place trends: {str(e)}")
return None
return trends
|
Socialet/web-backend
|
app/services/api/twitterAPI.py
|
twitterAPI.py
|
py
| 9,191 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38823318960
|
import csv
def write_dev_stock(data):
f = open('output.csv', 'w')
writer = csv.writer(f, lineterminator='\n')
i = 0
while i < len(data.index):
row = data.iloc[i]
writer.writerow(row)
i += 1
f.close()
|
shun-chiba/nq52
|
python/src/file/write_csv.py
|
write_csv.py
|
py
| 250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24657079519
|
from django.contrib import admin
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from . import models
import json
# Register your models here.
class BaseAdmin(admin.ModelAdmin):
list_per_page = 50
list_max_show_all = 200
show_full_result_count = False
preserve_filters = True
@admin.register(models.Statistics)
class StatisticsAdmin(BaseAdmin):
list_display = (
'id', 'jsonGlobalStatistics', 'jsonDomesticStatistics',
'jsonInternationalStatistics', 'modifyTime', 'crawlTime'
)
search_fields = ('crawlTime', 'modifyTime')
def jsonGlobalStatistics(self, obj):
return self.to_json(obj.globalStatistics)
jsonGlobalStatistics.short_description = _('globalStatistics')
jsonGlobalStatistics.admin_order_field = 'globalStatistics'
def jsonDomesticStatistics(self, obj):
return self.to_json(obj.domesticStatistics)
jsonDomesticStatistics.short_description = _('domesticStatistics')
jsonDomesticStatistics.admin_order_field = 'domesticStatistics'
def jsonInternationalStatistics(self, obj):
return self.to_json(obj.internationalStatistics)
jsonInternationalStatistics.short_description \
= _('internationalStatistics')
jsonInternationalStatistics.admin_order_field \
= 'internationalStatistics'
def to_json(self, data):
try:
data = json.loads(data)
except:
return
result = []
for k, v in sorted(data.items()):
result.append(format_html('{}: {}', k, v))
return mark_safe(format_html(
'<pre>{}</pre>', format_html('<br>'.join(result))))
@admin.register(models.City)
class CityAdmin(BaseAdmin):
list_display = (
'countryCode', 'provinceName', 'provinceCode', 'cityName',
'currentConfirmedCount', 'confirmedCount', 'suspectedCount',
'curedCount', 'deadCount', 'createTime', 'modifyTime'
)
search_fields = (
'cityName', 'countryCode', 'provinceCode', 'provinceName'
)
@admin.register(models.Province)
class ProvinceAdmin(BaseAdmin):
list_display = (
'countryCode', 'provinceName',
'currentConfirmedCount', 'confirmedCount', 'suspectedCount',
'curedCount', 'deadCount', 'createTime', 'modifyTime'
)
search_fields = ('provinceName', 'countryCode')
@admin.register(models.Country)
class CountryAdmin(BaseAdmin):
list_display = (
'continents', 'countryCode', 'countryName', 'countryFullName',
'currentConfirmedCount', 'confirmedCount', 'suspectedCount',
'curedCount', 'deadCount', 'createTime', 'modifyTime'
)
search_fields = (
'continents', 'countryFullName', 'countryCode', 'countryName'
)
@admin.register(models.CountryCode)
class CountryCodeAdmin(BaseAdmin):
list_display = (
'numericCode', 'countryCode', 'shortCountryCode', 'countryName',
'englishCountryName', 'englishCountryFullName', 'comment'
)
search_fields = (
'numericCode', 'countryCode', 'shortCountryCode', 'countryName',
'englishCountryName', 'englishCountryFullName', 'comment'
)
|
leafcoder/django-covid19
|
django_covid19/admin.py
|
admin.py
|
py
| 3,276 |
python
|
en
|
code
| 155 |
github-code
|
6
|
71781182589
|
import glob
import os
from datetime import datetime
import cv2
# isResize = False
isResize = True # change this to False, if resize is not required
images = []
image_src_folder = "images"
files = glob.glob(image_src_folder+"/*")
for file in files:
print(f"Processing: {os.path.basename(file)}")
if file.endswith(".jpg") or file.endswith(".png"):
image = cv2.imread(file)
print(f"original image dimension: {image.shape}") # image shape is displayed as height,width, channel
if isResize:
resize_scale = 0.45
image = cv2.resize(image, None, fx=resize_scale, fy=resize_scale, interpolation=cv2.INTER_AREA) # resize by scale
# image = cv2.resize(image, (640,480), interpolation=cv2.INTER_AREA) # resize by fixed output dimension
print(f"new image dimension: {image.shape}")
images.append(image)
# if your stitch don't go well, try the STITCHER_SCANS
stitcher = cv2.Stitcher.create(mode=cv2.STITCHER_PANORAMA)
# stitcher = cv2.Stitcher.create(mode=cv2.STITCHER_SCANS)
ret, stitched = stitcher.stitch(images)
if ret == cv2.STITCHER_OK:
output_fn = f'{datetime.now().strftime("%Y%m%d_%H%M%S")}.png'
cv2.imshow('Panorama', stitched)
cv2.imwrite(output_fn, stitched)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("Error during Stitching")
|
yptheangel/opencv-starter-pack
|
python/examples/image_stitching/stitching.py
|
stitching.py
|
py
| 1,349 |
python
|
en
|
code
| 8 |
github-code
|
6
|
24047121946
|
import random
from tekleo_common_message_protocol import OdSample
from tekleo_common_utils import UtilsImage, UtilsOpencv
from tekleo_common_utils_ai.dataset_modification.abstract_dataset_modifier import AbstractDatasetModifier
from injectable import injectable, autowired, Autowired
@injectable
class DatasetModifierSaturation(AbstractDatasetModifier):
@autowired
def __init__(
self, min_saturation_ratio: float, max_saturation_ratio: float, saturation_application: str, random_seed: int,
utils_image: Autowired(UtilsImage), utils_opencv: Autowired(UtilsOpencv),
):
self.utils_image = utils_image
self.utils_opencv = utils_opencv
self.min_saturation_ratio = min_saturation_ratio
self.max_saturation_ratio = max_saturation_ratio
self.saturation_application = saturation_application
self.random_seed = random_seed
# Instance of random generator
self.random = random.Random()
self.random.seed(self.random_seed)
def apply(self, sample: OdSample) -> OdSample:
# Convert image to opencv
image_pil = sample.image
image_cv = self.utils_image.convert_image_pil_to_image_cv(image_pil)
# Determine
saturation_ratio = self.random.uniform(self.min_saturation_ratio, self.max_saturation_ratio)
# Determine the sign
saturation_sign = 1
if self.saturation_application == 'decrease':
saturation_sign = -1
elif self.saturation_application == 'increase':
saturation_sign = 1
elif self.saturation_application == 'both':
should_increase = self.random.choice([True, False])
if should_increase:
saturation_sign = 1
else:
saturation_sign = - 1
# Convert back to 127
saturation_delta = saturation_ratio
saturation_value = 1 + saturation_sign * saturation_delta
# print("saturation_value=" + str(saturation_value))
# Apply saturation to the image
image_cv = self.utils_opencv.saturation(image_cv, saturation_coefficient=saturation_value)
# Convert back to pil
image_pil = self.utils_image.convert_image_cv_to_image_pil(image_cv)
# Generate new name
new_name = sample.name
if "_mod_" in new_name:
new_name = new_name + "_saturation_" + self.saturation_application[0:4]
else:
new_name = sample.name + "_mod_saturation_" + self.saturation_application[0:4]
# Return new sample
return OdSample(
new_name,
image_pil,
sample.items
)
|
JPLeoRX/tekleo-common-utils-ai
|
tekleo_common_utils_ai/dataset_modification/dataset_modifier_saturation.py
|
dataset_modifier_saturation.py
|
py
| 2,676 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1698064062
|
import pygame
# define constants for the window size and stack element size
ELEMENT_WIDTH = 50
ELEMENT_HEIGHT = 200
# initialize Pygame
pygame.init()
# create a window
window = pygame.display.set_mode((1000, 500))
# define a font to use for displaying the stack elements
font = pygame.font.Font(None, 36)
# define a stack to hold the operands
stack = []
# define a list of tokens (operators and operands) in RPN
tokens = ["2", "3", "5", "*", "8", "+", "4", "2", "/", "-"]
# iterate through the tokens
for token in tokens:
# if the token is an operand, push it onto the stack and draw it on the window
if token.isdigit():
stack.append(int(token))
text = font.render(token, True, (255, 255, 255))
window.blit(text, (10, (len(stack) - 1) * ELEMENT_HEIGHT))
# if the token is an operator, pop the top two operands from the stack,
# perform the operation, and push the result back onto the stack
else:
right = stack.pop()
left = stack.pop()
if token == "+":
result = left + right
elif token == "-":
result = left - right
elif token == "*":
result = left * right
elif token == "/":
result = left / right
stack.append(result)
text = font.render(str(result), True, (255, 255, 255))
window.blit(text, (10, (len(stack) - 1) * ELEMENT_HEIGHT))
# update the window to show the new stack element
pygame.display.update()
# pause for a moment to allow the user to see the update
pygame.time.delay(500)
# the final result will be the top item on the stack
result = stack.pop()
text = font.render(str(result), True, (255, 255, 255))
window.blit(text, (10, (len(stack) - 1) * ELEMENT_HEIGHT))
pygame.display.update()
# run the Pygame loop until the user closes the window
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# quit Pygame
pygame.quit()
|
Dhivyno/Programming-projects
|
All files/test.py
|
test.py
|
py
| 2,005 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26922909604
|
"""
Imports the various compute backends
"""
from typing import Set
from ..exceptions import InputError, ResourceError
from .cfour import CFOURHarness
from .dftd3 import DFTD3Harness
from .entos import EntosHarness
from .gamess import GAMESSHarness
from .molpro import MolproHarness
from .mopac import MopacHarness
from .mp2d import MP2DHarness
from .nwchem import NWChemHarness
from .psi4 import Psi4Harness
from .rdkit import RDKitHarness
from .terachem import TeraChemHarness
from .torchani import TorchANIHarness
__all__ = ["register_program", "get_program", "list_all_programs", "list_available_programs"]
programs = {}
def register_program(entry_point: 'ProgramHarness') -> None:
"""
Register a new ProgramHarness with QCEngine.
"""
name = entry_point.name
if name.lower() in programs.keys():
raise ValueError('{} is already a registered program.'.format(name))
programs[name.lower()] = entry_point
def unregister_program(name: str) -> None:
"""
Unregisters a given program.
"""
ret = programs.pop(name.lower(), None)
if ret is None:
raise KeyError(f"Program {name} is not registered with QCEngine")
def get_program(name: str, check: bool = True) -> 'ProgramHarness':
"""
Returns a program's executor class
Parameters
----------
check
``True`` Do raise error if program not found. ``False`` is handy for
the specialized case of calling non-execution methods (like parsing for testing)
on the returned ``Harness``.
"""
name = name.lower()
if name not in programs:
raise InputError(f"Program {name} is not registered to QCEngine.")
ret = programs[name]
if check and not ret.found():
raise ResourceError(f"Program {name} is registered with QCEngine, but cannot be found.")
return ret
def list_all_programs() -> Set[str]:
"""
List all programs registered by QCEngine.
"""
return set(programs.keys())
def list_available_programs() -> Set[str]:
"""
List all programs that can be exectued (found) by QCEngine.
"""
ret = set()
for k, p in programs.items():
if p.found():
ret.add(k)
return ret
register_program(Psi4Harness())
register_program(RDKitHarness())
register_program(TorchANIHarness())
register_program(MolproHarness())
register_program(MopacHarness())
register_program(DFTD3Harness())
register_program(TeraChemHarness())
register_program(MP2DHarness())
register_program(GAMESSHarness())
register_program(NWChemHarness())
register_program(CFOURHarness())
register_program(EntosHarness())
|
ChemRacer/QCEngine
|
qcengine/programs/base.py
|
base.py
|
py
| 2,624 |
python
|
en
|
code
| null |
github-code
|
6
|
70647215549
|
# Procedura REKURENCYJNE-WYSZUKIWANIE-BINARNE(A, p, r, x)
# 1. Jeśli p > r to zwróć NIE-ZNALEZIONO
# 2. W przeciwnym razie (p < r) wykonaj, co następuje:
# A. Nadaj q wartość flor (p + r) / 2
# B. Jeśli A[q] = x to zwróć q
# C. W przeciwnym razie (A[q] not= x), jeśli A[q] > x, to zwróć
# REREKURENCYJNE WYSZUKIWANIE BINARNE (A, p, q-1, x)
# D. W przeciwnym razie (A[q] < x) zwróć
# REKURENCYJNE-WYSZUKIWANIE-BINARNE(A, q+1, r, x)
lista = ['a', 'a', 'b', 'c', 'c', 'd', 'e', 'f', 'f', 'f', 'g', 'h', 'i', 'j']
def rekurencyjne_wyszukiwanie_binarne(A, p, r, x):
if p > r:
return 'NIE-ZNALEZIONO'
else:
# A
q = int((p + r) / 2)
# B
if A[q] == x:
return q
else:
# C
if A[q] > x:
return rekurencyjne_wyszukiwanie_binarne(A, p, q - 1, x)
# D
else:
return rekurencyjne_wyszukiwanie_binarne(A, q + 1, r, x)
# wywołanie funkcji
print(rekurencyjne_wyszukiwanie_binarne(lista, 0, len(lista), 'g'))
# -----------------------------------------------------------------------------
# Procedura WYSZUKIWANIE-BINARNE(A, n, x)
# 1. Ustaw p na 1 i r na n
# 2. Dopóki p <= r, wykonuj, co następuje:
# A. Nadaj q wartość floor (p+r)/2
# B. Jeśli A[q] = x, to zwróć q
# C. W przeciwnym razie (A[q] not= x), jeśli A[q] > x, to ustaw r na q - 1
# D. W przeciwnym razie (A[q] < x), ustaw p na q + 1
# Zwróć nie znaleziono
def wyszukiwanie_binarne(A, n, x):
p = 0
r = n
kroki = 0
while p <= r:
kroki += 1
q = int((p + r) / 2)
if A[q] == x:
return str(q) + ' kroki: ' + str(kroki)
else:
if A[q] > x:
r = q - 1
else:
p = q + 1
return 'NIE-ZNALEZIONO'
# wywołanie funkcji
print(wyszukiwanie_binarne(lista, len(lista) - 1, 'i')) # 12 powinno zwrocic przy i
# Procedura SORTOWANIE-PRZEZ-WYBIERANIE(A, n)
# Dane wejściowe:
# - A: tablica
# - n: liczba elementów w A do posortowania.
# Wynik: elementy tablicy A są posortowane w porządku niemalejącym (czyli rosnącym, bez sensu to nazwali...)
# 1. Dla i=1 do n-1:
# A. Przypisz zmiennej najmniejszy indeks najmniejszego elementu w podtablicy A[i...n]
# B. Zamień A[i] z A[najmniejszy]
#
# Procedura na nowo SORTOWANIE-PRZEZ-WYBIERANIE(A, n)
# Dane wejściowe: jak wyżej
# 1. Dla i = 1 do n - 1:
# A. Przypisz zmiennej najmniejszy wartość i
# B. Dla j = i + 1 do n:
# i. Jeśli A[j] < A[najmniejszy], to podstaw j do najmniejszy
# C. Zamień A[i] z A[najmniejszy]
#
# Procedura SORTOWANIE-PRZEZ-WYBIERANIE(A, n) - po mojemu
# 1. Porównaj element drugi z pierwszym i jeżeli mniejszy od pierwszego to zamień
# 2. Porównaj trzeci z pierwszym
# 3. ...
# ... teraz na pierwszym miejscu mamy na pewno najmniejszy element, więc pomijamy już pierwsze miejsce
# 4. Porównaj trzeci z drugim
# 5. i tak do n-1
# Sortowanie przez wybieranie
def swap(L, i, k):
temp = L[i]
L[i] = L[k]
L[k] = temp
return L
def select_sort(L):
left = 0
right = len(L) - 1
for i in range(left, right):
k = i
for j in range(i + 1, right + 1):
if L[j] < L[k]:
k = j
swap(L, i, k)
return L
# Procedura SORTOWANIE-PRZEZ-WSTAWIANIE(A, n)
# 1. Dla i = 2 do n:
# A. Ustaw klucz na A[i] i podstaw do j wartość i-1
# B. Dopóki j>0 i A[j] > klucz, wykonuj, co następuje:
# i. Podstaw A[j] do A[j + 1]
# ii. Zmniejsz j o 1 (tzn. podstaw do j wartość j-1)
# C. Podstaw klucz do A[j+1]
# Sortowanie przez wstawianie
def insertion_sort(A):
for i in range(1, len(A)):
klucz = A[i]
j = i - 1
while j >= 0 and A[j] > klucz:
A[j + 1] = A[j]
j = j - 1
A[j + 1] = klucz
return A
# Procedura SORTOWANIE-PRZEZ-SCALANIE(A, p, r) - NIE WYSZŁA MI JAK COŚ TO POWTÓRZYĆ
# ! UWAGA ! - procedura ta do działania potrzebuje skutecznej metody scalaj()
#
# Dane wejściowe:
# A: tablica
# p, r: początkowy i końcowy indeks podtablicy A
# Wynik: elementy podtablicy A[p..r] posortowane w porządku niemalejącym.
# 1. Jeśli p>=r, to podtablica A[p..r] ma najwyżej 1 element, jest więc już
# posortowana. Powróć zatem, nie wykonując niczego.wykonując
# 2. W przeciwnym razie wykonaj, co następuje:
# A. Nadaj q wartość int((p+r)/2)
# B. Wywołaj rekurencyjnie SORTOWANIE-PRZEZ-SCALANIE(A, p, q)
# C. Wywołaj rekurencyjnie SORTOWANIE-PRZEZ-SCALANIE(A, q+1, r)
# D. Wywołaj SCALANIE(A, p, q, r)
#
# Procedura SCALAJ(A, p, q, r)
# Dane wejściowe:
# A: tablica
# p, q, r: indeksy do tablicy A. W przypadku każdej z podtablic: A[p..q] i
# A[q+1, r], zakłada się, że jest już posortowana
# Wynik: podtablica A[p..r] zawiera elementy występujące pierwotnie w A[p..q] i
# A[q+1, r], obecnie jednak cała podtablica A[p..q] jest posortowana.
# 1. Nadaj n1 wartość q-p+1, a n2 wartość r-q
# 2. Niech B[1..n1+1] i C[1..n2+1] będą nowymi tablicami
# 3. Skopiuj A[p..q] do B[1..n1] oraz A[q+1..r] do C[1..n2]
# 4. Ustaw zarówno B[n1+1], jak i C[n2+1] na infinity
# 5. Ustaw i oraz j na 1
# 6. Dla k = p do r:
# A. Jeśli B[i] <= C[j], to przypisz B[i] do A[k] i zwiększ i o 1
# B. W przeciwnym razie (B[i] > C[j]) przypisz C[i] do A[k] i zwiększ j o 1
# próba zaimplementowania metody scalaj wg instrukcji z książki
def scalaj_z_ksiazki(A):
p = 0
r = len(A) - 1
q = int((p + r + 1) / 2)
#n1 = q - p + 1
#n2 = r - q
B = A[p:q]
C = A[q + 1:r]
i = 1
j = 1
for k in range(r + 1):
if B[i] <= C[j]:
A[k] = B[i]
i += 1
else:
A[k] = C[i]
j += 1
return A
# moja implementacja funkcji scalaj po przeanalizowaniu tego własnym mózgiem
# na wejściu przyjmuje 2 tablice, obie posortowane rosnąco
# na wyjściu wypuszcza jedną scaloną posortowaną rosnąco
def scalaj(A, B):
C = []
for e in A + B:
if not B:
C.append(A[0])
A.remove(A[0])
elif not A:
C.append(B[0])
B.remove(B[0])
elif A[0] <= B[0]:
C.append(A[0])
A.remove(A[0])
elif A[0] >= B[0]:
C.append(B[0])
B.remove(B[0])
return C
"""
Procedura NAPRAWDE_PROSTE_SORTOWANIE(A, n)
Dane wejściowe:
- A: tabklica, której elementami są tylko 1 lub 2
- n: liczba elementów w A do posortowania
Wynik: elementy A są posortowane w porządku niemalejącym.
1. Nadaj k wartość 0
2. Dla i = 1 do n:
A. Jeśli A[i] = 1, to zwiększ k o 1.
3. Dla i = 1 do k:
A. Podstaw 1 do A[i]
4. Dla i = k + 1 do n:
A. Podstaw 2 do A[i]
"""
def naprawde_proste_sortowanie(A):
# DANE WEJŚCIOWE
A = A
n = len(A)
# OBLICZENIA
k = 0
for i in range(n):
if A[i] == 1:
k += 1
for i in range(k):
A[i] = 1
for i in range(k, n):
A[i] = 2
# WYNIK
return A
"""
Procedura OBLICZ-TABELE-LCS(X, Y)
Dane wejściowe: X i Y - dwa napisy długości, odpowiednio m i n
Wynik: tablica l[0..m, 0..n]. Wartość l[m, n] jest długością najdłuższego
wspólnego podciągu X i Y
1. Niech l[0..m, 0..n] będzie nową tablicą
2. Dla i = 0 do m:
A. Nadaj l[i, 0] wartość 0
3. Dla j = 0 do n:
A. Nadaj l[0, j] wartość 0
4. Dla i = 1 do m:
A. Dla j = 1 do n
i. Jeśli xi jest takie samo jak yi, to nadaj l[i, j] wartość l[i-1, j-1] + 1
ii. W przeciwnym razie (xi różni się od yi) nadaj l[i, j] wartość większą
z l[i, j-1] i l[i-1, j]. Jeśli l[i, j-1] równa się l[i-1, j], to nie ma
znaczenia, którą wybierzesz
5. Zwróć tablicę l
"""
def oblicz_tabele_lcs(X, Y):
# Dodatkowe dane wejściowe
m = len(X)
n = len(Y)
# 1, 2, 3
l = []
for e in range(m + 1):
l += [[0 for i in range(n + 1)]]
# 4
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
l[i][j] = l[i - 1][j - 1] + 1
else:
if l[i][j - 1] > l[i - 1][j]:
l[i][j] = l[i][j - 1]
else:
l[i][j] = l[i - 1][j]
# WYNIK
return l
"""
Procedura ZESTAWIAJ-LCS(X, Y, l, i, j)
Dane wejściowe:
X i Y: dwa napisy
l: tablica wypełniona przez procedurę oblicz-tabelę-lcs
i oraz j: indeksy do, odpowiednio, X i Y, a także do l
Wynik: LCS (najdłuższy wspólny podciąg) Xi i Yi
1. Jeśli l[i, j] równa się 0, zwróć napis pusty
2. W przeciwnym razie (ponieważ l[i, j] jest dodatnie, więc również i oraz j są
dodatnie), jeżeli xi jest taki sam jak yj, zwróć napis utworzony najpierw przez
rekurencyjne wywołanie ZESTAWIAJ-LCS(X, Y, l, i-1, j-1), a następnie dodanie
na jego końcu xi (lub yj)
3. W przeciwnym razie (xi różni się od yj), jeśli l[i, j-1] jest większe niż
l[i-1, j], zwróć napis przekazany przez rekurencyjne wywołanie
ZESTAWIAJ-LCS(X, Y, l, i, j-1)
4. W przeciwnym razie (xi różni się od yj i l[i, j-1] jest mniejsze lub równe
l[i-1, j], zwróć napis przekazany przez rekurencyjne wywołanie
ZESTAWIAJ-LCS(X, Y, l, i-1, j))
"""
def zestawiaj_lcs(X, Y, l, i, j):
# 1, 2
if l[i + 1][j + 1] == 0:
return ''
else:
if X[i] == Y[j]:
return zestawiaj_lcs(X, Y, l, i - 1, j - 1) + X[i] # ????????????
else:
if l[i][j - 1] > l[i - 1][j]:
return zestawiaj_lcs(X, Y, l, i, j - 1)
else:
return zestawiaj_lcs(X, Y, l, i - 1, j)
def szyfruj_cezarze(text):
"""
1. Przypisz do zmiennej text - tekst do zamiany
2. n = długość tekstu
3. Stworz tablice wszystkich znakow
4. m = długość wszystkich znaków
5. Dla każdego znaku w text:
a. określ indeks tego znaku w text
b. określ indeks tego znaku w all_chars
c. zmień ten znak z text na +3
"""
# input
n = len(text)
all_chars = 'abcdefghijklmnouprstwvqxyz abcd '
exit_text = ''
# calculations
for e in text:
i = all_chars.index(e)
exit_text += all_chars[i + 3]
# output
return exit_text
def odszyfruj_cezarze(text):
"""
odwrócona kalkulacja metody szyfrującej
"""
# input
n = len(text)
all_chars = 'abcdefghijklmnouprstwvqxyz abcd '
exit_text = ''
# calculations
for e in text:
i = all_chars.index(e)
exit_text += all_chars[i - 3]
# output
return exit_text
def kompresuj(text):
"""
Input:
- text: tekst do skompresowania
Output:
- text_tab: tablica z unikatowymi słowami użytymi w tekście
- text_code: tablica z tekstem uporządkowanym wg indeksów
Calculations:
1. Przyjmij tekst wejściowy
2. Zrób puste tablice: text_tab i text_code
3. Zrób tablice raw_text z tekstu text - każde słowo osobno
4. Dla każdego elementu z tablicy raw_text:
a. Jeżeli słowo jest już w text_tab:
- Dodaj do text_code jego index z text_tab
b. Jeżeli tekstu nie ma w text_tab:
- Dodaj element do text_tab
- Dodaj do text_code jego index z text_tab
5. Stwórz tablicę dwuwymiarową code i dodaj do niej text_tab i text_code
6. Zwróć tablicę
"""
# Input
text = text
text_tab = []
text_code = []
raw_text = text.split(' ')
# Calculations
for word in raw_text:
# If word is in text_tab
if word in text_tab:
text_code.append(text_tab.index(word))
# If don't
else:
text_tab.append(word)
text_code.append(text_tab.index(word))
# Output
code = [text_tab, text_code]
return code
def dekompresuj(code):
"""
Input:
- tablica dwuwymiarowa z:
1. słowami - text_tab
2. kolejnością słów - text_code
Calculations:
1. Stwórz z tablicy dwuwymiarowej 2 tablice jednowymiarowe:
- text_tab
- code_tab
2. Stwórzy pusty string
3. Dla każdego kodu z code_tab:
a. dodaj do stringa text z tablicy text_tab o indeksie
danego kodu
b. dodaj do stringa spację
4. Zwróć stringa
Output:
- tekst ułożony z słów z tablicy text_tab wg kolejności z text_code
"""
# Input
text_tab = code[0]
code_tab = code[1]
string = ''
# Calculations
for code in code_tab:
string += text_tab[code]
string += ' '
# Output
return string
# select_sort
lista = [12, 9, 3, 7, 14, 11]
print(select_sort(lista))
# insertion_sort
lista = [12, 9, 3, 7, 14, 11]
print(insertion_sort(lista))
# scalanie (moje)
listaA = [2, 3, 9, 11, 12, 13, 19, 43, 129, 300]
listaB = [1, 3, 7, 8, 10, 20, 56]
print(scalaj(listaA, listaB))
# really_simple_sort
A = [1, 2, 1, 1, 1, 2]
print(naprawde_proste_sortowanie(A))
# najdłuższy ciąg wspólnych znaków
X = 'CATCGA'
Y = 'GTACCGTCA'
# wynik i wydruk wyniku
# x y
# m n
# i, j
#l[0][9] = 9
# print(A[2][9])
l = oblicz_tabele_lcs(X, Y)
for e in l:
print(e)
i = len(X) - 1
j = len(Y) - 1
# print zestawiaj_lcs
print(zestawiaj_lcs(X, Y, l, i, j))
text = 'hej jestem maciej zeta a ty'
print('text przed: ' + text)
text = szyfruj_cezarze(text)
print('zaszyfrowany: ' + text)
text = odszyfruj_cezarze(text)
print('odszyfrowany: ' + text)
# testy kompresji
text = '''
A black hole is a region of spacetime exhibiting gravitational acceleration
so strong that nothing no particles or even electromagnetic radiation such as
light can escape from it. The theory of general relativity predicts that a
sufficiently compact mass can deform spacetime to form a black hole.
The boundary of the region from which no escape is possible is called the
event horizon. Although the event horizon has an enormous effect on the fate
and circumstances of an object crossing it, no locally detectable features
appear to be observed. In many ways, a black hole acts like an ideal black
body, as it reflects no light. Moreover, quantum field theory in
curved spacetime predicts that event horizons emit Hawking radiation, with
the same spectrum as a black body of a temperature inversely proportional to
its mass. This temperature is on the order of billionths of a kelvin for
black holes of stellar mass, making it essentially impossible to observe.
Objects whose gravitational fields are too strong for light to escape were
first considered in the 18th century by John Michell and Pierre-Simon
Laplace. The first modern solution of general relativity that would
characterize a black hole was found by Karl Schwarzschild in 1916, although
its interpretation as a region of space from which nothing can escape was
first published by David Finkelstein in 1958. Black holes were long
considered a mathematical curiosity; it was during the 1960s that theoretical
work showed they were a generic prediction of general relativity. The
discovery of neutron stars by Jocelyn Bell Burnell in 1967 sparked interest
in gravitationally collapsed compact objects as a possible astrophysical
reality.
'''
print('PRZED KOMPRESJA: ' + text)
compressed = kompresuj(text)
print(' PO KOMPRESJI: ' + str(compressed))
decompressed = dekompresuj(compressed)
print(' PO DEKOMPRESJI: ' + str(decompressed))
print(' oryginalny text: ' + str(len(text)))
print('skompresowany text + kod skompresowanego textu: ' + str(len(compressed[0]) + len(compressed[1])))
|
koualsky/dev-learning
|
algorithms/algorithms.py
|
algorithms.py
|
py
| 16,297 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
4955963415
|
'''
P.S: Given an integer n, return true if n has exactly three positive divisors. Otherwise, return false.
An integer m is a divisor of n if there exists an integer k such that n = k * m.
'''
#Solution:
class Solution:
def isThree(self, n: int) -> bool:
sum=0
for x in range(1,n+1):
if n%x==0:
sum+=1
return sum==3
#Timecomplexity: O(n)
|
nidhisha-shetty/LeetCode
|
three-divisors.py
|
three-divisors.py
|
py
| 400 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39830489884
|
from collections import deque
def solution(n, info):
answer = []
diff = 0
queue = deque()
queue.append((0, [0,0,0,0,0,0,0,0,0,0,0]))
while queue:
idx, arr = queue.popleft()
# 화살을 전부 쐈을경우
if sum(arr) == n:
# 어피치와 라이언 점수 체크
apeach, lion = 0, 0
for i in range(11):
if info[i] == arr[i] == 0:
continue
if info[i] >= arr[i]:
apeach += 10 - i
else:
lion += 10 - i
# 라이언의 점수가 어피치 보다 크면
if apeach < lion:
curr_diff = lion - apeach
# 점수차가 이전보다 작을경우
if diff > curr_diff:
continue
# 점수차가 이전보다 클경우
if diff < curr_diff:
# 점수차 변경
diff = curr_diff
# 결과 리스트 초기화
answer.clear()
# 결과 리스트에 추가
answer.append(arr)
# 화살을 제한 개수보다 많이 쐈을경우
elif sum(arr) > n:
continue
# 현재 인덱스가 0점인 과녁일경우
elif idx == 10:
temp = arr.copy()
# 남은 화살 개수 만큼 0점에 쏘기
temp[idx] = n - sum(arr)
queue.append((-1, temp))
else:
# 어피치보다 해당 과녁을 더많이 맞춤
temp = arr.copy()
temp[idx] = info[idx] + 1
queue.append((idx + 1, temp))
# 해당과녁 맞추지 않음
temp2 = arr.copy()
temp2[idx] = 0
queue.append((idx + 1, temp2))
return answer[-1] if answer else [-1]
print(solution(5, [2,1,1,1,0,0,0,0,0,0,0]))
print(solution(1, [1,0,0,0,0,0,0,0,0,0,0]))
print(solution(9, [0,0,1,2,0,1,1,1,1,1,1]))
print(solution(10, [0,0,0,0,0,0,0,0,3,4,3]))
|
omg7152/CodingTestPractice
|
kakaoBlindRecruitment2022/Q4.py
|
Q4.py
|
py
| 2,081 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
73539848828
|
def conta_letras(frase, contar='vogais'):
tam = len(frase)
if contar == 'vogais':
vogais = ['a', 'e', 'i', 'o', 'u']
n_vogais = 0
for i in range(tam):
if frase[i].lower() in vogais:
n_vogais += 1
return n_vogais
if contar == 'consoantes':
consoantes = ['b', 'c', ' d', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
'p', 'q', 'r', 's', 't', 'v', 'x', 'y', 'w', 'z']
n_consoantes = 0
for i in range(tam):
if frase[i].lower() in consoantes:
n_consoantes += 1
return n_consoantes
|
Icarolmo/Introducao-a-Ciencia-da-Computacao-com-Python-parte-2-IME-USP
|
Exercícios/contaVogais.py
|
contaVogais.py
|
py
| 633 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
39122258660
|
from service_info import dp, bot, greeting, download_url, path_to_file_url, PhotoStatesGroup, dict_config, lang_dict
import aiohttp
from visor import tess_visor, easy_visor, keras_visor
from aiogram import types, executor
from keyboards import type_kb, lang_kb
from aiogram.dispatcher import FSMContext
import logging
import shutil
async def on_startup(_):
logging.info('bot was started')
@dp.message_handler(commands = ['start'])
async def command_start(message: types.Message):
logging.info('User sent a command /start')
await bot.send_message(chat_id=message.chat.id, text=greeting)
await bot.send_sticker(chat_id=message.chat.id, sticker='CAACAgIAAxkBAAEJO-Jkf5GZ-dCO4T3wGzzFjksgFB_JgwACYAIAAgvNDgNERok1XlXTOS8E')
await PhotoStatesGroup.photo.set()
@dp.message_handler(content_types=['photo'], state=PhotoStatesGroup.photo)
async def photo_handler(message: types.message, state: FSMContext):
logging.info('State: Photo ')
logging.info('The bot received the photo ')
picture = bytearray()
logging.info('Aiohttp request is processing')
async with aiohttp.ClientSession() as session:
resp = await session.get(download_url+message.photo[2].file_id)
resp = await resp.json(encoding='UTF-8')
async with session.get(path_to_file_url+resp['result']['file_path']) as responce:
async for chunk in responce.content.iter_chunked(64*1024):
picture += chunk
logging.info('Photo has been downloaded from Telegram Server')
async with state.proxy() as data:
data['photo'] = picture
logging.info('Photo saved in MemoryStorage')
await message.answer('Какой тип больше подходит данному тексту?', reply_markup=type_kb)
await PhotoStatesGroup.next()
@dp.message_handler(state=PhotoStatesGroup.type_photo)
async def type_picture(message: types.message, state: FSMContext):
logging.info('State: Mode for OCR')
logging.info('Mode for OCR was recieved')
if message.text in list(dict_config.keys()):
async with state.proxy() as data:
logging.info('Mode saved in MemoryStorage')
data['type'] = message.text
else:
await message.answer('Некорретный ответ, выбрал стандартный режим')
logging.warning('Was set a standart Mode. User sent uncorrect mode')
async with state.proxy() as data:
data['type'] = 'Однородный блок текста на картинке'
logging.info('Mode saved in MemoryStorage')
await message.answer('Какой язык на картинке?', reply_markup=lang_kb)
await PhotoStatesGroup.lang.set()
@dp.message_handler(state=PhotoStatesGroup.lang)
async def type_picture(message: types.message, state: FSMContext):
logging.info('State: Language')
logging.info('Language was recieved')
if message.text in list(lang_dict.keys()):
async with state.proxy() as data:
data['lang'] = message.text
else:
await message.answer('Некорректный ответ. Выбран английский язык по умолчанию')
async with state.proxy() as data:
data['lang'] = 'Английский'
logging.info('Uncorrect language. Was set a stardart language')
logging.info('Language was saved in MemoryStorage')
await message.answer('Обработка... Это может занять минуту.')
logging.info("Was start a function 'Visor'. Data was sent to processing")
text = tess_visor(image=data['photo'], type_picture=data['type'], language=data['lang'])
await message.answer(f" {text[0]}\n Pytessart\n Time: {text[1]}")
text = easy_visor(image=data['photo'], type_picture=data['type'], language=data['lang'])
await message.answer(f" {text[0]}\n EasyOCR\n Time: {text[1]}")
text = keras_visor()
await message.answer(f" {text[0]}\n KerasOCR\n Time: {text[1]}")
shutil.rmtree('images')
await message.answer('Ожидаю следующую картинку!')
await PhotoStatesGroup.photo.set()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, filename="bot.log", filemode="w", format="%(asctime)s %(levelname)s %(message)s")
executor.start_polling(dispatcher=dp, skip_updates = True, on_startup=on_startup)
|
dfgion/VisorBot
|
bot.py
|
bot.py
|
py
| 4,388 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7506825740
|
# Exercício 1: Crie um algoritmo não recursivo para contar quantos números pares existem em uma sequência numérica (1 a n).
def pairs(n):
count = 0
while n > 0:
if n % 2 == 0:
count += 1
n -= 1
return count
print(pairs(4))
|
Gonzagadavid/trybe-exercises-computer-science
|
exercises/recursividade_35.2/exercise-1.py
|
exercise-1.py
|
py
| 255 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
14564990421
|
import numpy as np
from IO.Pinocchio.ReadPinocchio import mf
import matplotlib.pyplot as plt
# Box Size
boxsize = 150.0
aux = mf("../TestRuns/pinocchio.1.9775.example.mf.out", "mf")
mf0 = mf("../TestRuns/pinocchio.1.9775.example.catalog.out", "catalog", 64, boxsize)
mf0.dndm_teo = np.interp(mf0.m, aux.m, aux.dndm_teo)
mf0.dndlnm_teo = np.interp(mf0.m, aux.m, aux.dndlnm_teo)
plt.loglog(mf0.m, mf0.dndlnm, label='dn/dm Pin.')
plt.loglog(mf0.m, mf0.dndlnm_teo, label='dn/dm Teo.')
plt.legend()
plt.show()
plt.plot(mf0.m, mf0.dndlnm/mf0.dndlnm_teo, label='dn/dlnm Pin.')
plt.axhline(1.0)
plt.ylim([0.5, 1.05])
plt.xscale('log')
plt.legend()
plt.show()
|
TiagoBsCastro/PITILESS-SLICER
|
Test/test_mf.py
|
test_mf.py
|
py
| 657 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24274585662
|
# -----------------------------------------------------------
# Creates the views for the database.
# This views are called when user navigates to a certain url.
# They are responsible for either rendering an HTML template or the API data that are requested
# For example: Navigating to the url 'api/operations/' will trigger the OperationListCreateAPIView class
# Reference: https://docs.djangoproject.com/en/4.0/topics/class-based-views/
# -----------------------------------------------------------
import csv
import decimal
import json
import logging
import os
import shutil
import sys
import threading
import time
import zipfile
from datetime import datetime
from decimal import Decimal
import numpy as np
import open3d as o3d
import pandas as pd
import pytz
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, get_user_model, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.gis.geos import point
from django.core import serializers as core_serializers
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage
from django.db.models import Avg
from django.forms.models import model_to_dict
from django.http import (FileResponse, Http404, HttpResponse,
HttpResponseNotFound, HttpResponseRedirect,
JsonResponse)
from django.shortcuts import (get_list_or_404, get_object_or_404, redirect,
render)
from django.urls import resolve, reverse, reverse_lazy
from django.views import View, generic
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from formtools.wizard.views import SessionWizardView
from guardian.shortcuts import assign_perm
from json2html import *
from logic import utils
from logic.algorithms.ballistic import ballistic
from logic.algorithms.build_map import (build_map_request_handler,
img_georeference)
from logic.algorithms.flying_report import flying_report
from logic.algorithms.lidar_point_cloud import lidar_points
from logic.algorithms.mission import mission_request_handler
from logic.algorithms.range_detection import range_detection
from logic.algorithms.water_collector import water_collector
from logic.algorithms.weather_station import (weather_station_ros_publisher,
weather_station_ros_subscriber)
from logic.Constants import Constants
from PIL import Image
from rest_framework import generics, permissions, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from .factories import *
from .forms import FlyingReportForm, JoinOperationForm, NewUserForm
from .models import Operation
from .permissions import IsOwnerOrReadOnly
from .serializers import *
logger = logging.getLogger(__name__)
# Function for creating Thread instances with stop function and timer function
class MyThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(MyThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
self._time = 0
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def time(self, seconds):
self._time = seconds
def get_time(self):
return self._time
# SAASAS
class DatabaseFiller(APIView):
'''
A class that populates the database with dummy data.
It utilizes the Factory notion, using the Factory Boy library
Reference: https://factoryboy.readthedocs.io/en/stable/orms.html
'''
def get(self, request):
UserFactory.create_batch(20)
UserLogFactory.create_batch(20)
OperationFactory.create_batch(20)
mission_points = MissionPointFactory.create_batch(10)
MissionFactory.create_batch(20, mission_points=tuple(mission_points))
mission = Mission.objects.all().first()
drones = DroneFactory.create_batch(20)
DroneToOperationLogFactory.create_batch(20)
WeatherStationFactory.create_batch(50)
TelemetryFactory.create_batch(50)
LiveStreamSessionFactory.create_batch(20)
RawFrameFactory.create_batch(20)
DetectionFactory.create_batch(20)
DetectionSessionFactory.create_batch(50)
DetectionFrameFactory.create_batch(20)
DetectedObjectFactory.create_batch(20)
AlgorithmFactory.create_batch(20)
WaterSamplerFactory.create_batch(20)
ErrorMessageFactory.create_batch(20)
FrontEndUserInputFactory.create_batch(20)
LoraTransmitterFactory.create_batch(20)
LoraTransmitterLocationFactory.create_batch(20)
LidarPointSessionFactory.create_batch(20)
LidarPointFactory.create_batch(20)
BuildMapImageFactory.create_batch(50)
BuildMapSessionFactory.create_batch(20)
ControlDeviceFactory.create_batch(20)
MissionLogFactory.create_batch(20)
return redirect('login')
class OperationListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
'''
List all operations or create new one. The get and create methods are inherited,
using the generics.ListCreateAPIView.
Tutorial Reference: https://www.django-rest-framework.org/tutorial/3-class-based-views/
'''
queryset = Operation.objects.all()
serializer_class = OperationSerializer
'''
Ensure that authenticated requests get read-write access, and unauthenticated requests get read-only access
'''
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
def perform_create(self, serializer):
'''
Allows us to modify how the instance save is managed,
and handle any information that is implicit in the incoming request or requested URL.
'''
serializer.save(
operator=self.request.user) # Operations are associated with the user that created them
class DroneListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
serializer_class = DroneSerializer
def get_queryset(self):
operation_name = self.kwargs.get("operation_name")
return Drone.objects.filter(operation__operation_name=operation_name)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class DroneRetrieveAPIView(LoginRequiredMixin, generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update (patch) or delete a drone instance
"""
queryset = Drone.objects.all()
serializer_class = DroneSerializer
lookup_field = 'drone_name'
def get_object(self):
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
obj = Drone.objects.get(drone_name=drone_name)
if obj is None:
raise Http404
return obj
def patch(self, request, *args, **kwargs):
'''
Partially update the attributes of a drone.
This is useful for example in case the drone is connected/disconnected from the platform, we update (patch)
the "is_drone_active" field to true/false. OR we can update its DroneDetection field
'''
operation_name = self.kwargs.get("operation_name")
operation_obj = Operation.objects.filter(
operator=request.user, active=True)
drone_name = self.kwargs.get("drone_name")
qs = Drone.objects.filter(
name=drone_name, operation__operation_name=operation_name)
obj = get_object_or_404(qs)
serializer = DroneSerializer(
obj, data=json.loads(request.body), partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
class DetectionRetrieveAPIView(LoginRequiredMixin, generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update (patch) or delete a detection drone instance
"""
queryset = Drone.objects.all()
serializer_class = DetectionDroneSerializer
lookup_field = 'drone_name'
def patch(self, request, *args, **kwargs):
'''
Partially update the attributes of a detection drone.
This is useful when we just want to change the detection status of the drone
'''
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
qs = Detection.objects.filter(
name=drone_name, operation__operation_name=operation_name)
obj = get_object_or_404(qs)
serializer = DetectionSerializer(
obj, data=json.loads(request.body), partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
class MissionListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = Mission.objects.all()
serializer_class = MissionSerializer
def mission_save_to_db(missionObj, dronePK, userPK, operationPK):
serializer = MissionSerializer(data=missionObj)
if serializer.is_valid():
createdMission = serializer.save()
Drone.objects.filter(pk=dronePK).update(mission=createdMission.pk)
logger.info('Mission with id {} is created successfully.'.format(
createdMission.pk))
MissionLoggerListCreateAPIView.mission_logger_save_to_db(
'START_MISSION', createdMission, userPK, operationPK, dronePK)
return True
else:
msg = 'Mission is not valid and is not created. Error: {}.'.format(
serializer.errors)
from .consumers import ErrorMsg
ErrorMsg.set_message_and_error(logger, Drone.objects.get(
pk=dronePK).operation.operation_name, msg)
return False
class MissionLoggerListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = MissionLog.objects.all()
serializer_class = MissionLoggerSerializer
def mission_logger_save_to_db(action, mission, userPK, operationPK, dronePK):
if Mission.objects.get(pk=mission.pk).mission_type == 'SEARCH_AND_RESCUE_MISSION':
algorithm = Algorithm.objects.filter(
algorithm_name='CALCULATE_SEARCH_AND_RESCUE_MISSION_PATHS_ALGORITHM', user=userPK, operation=operationPK).last()
algorithmPK = algorithm.pk
else:
algorithmPK = None
missionLoggerData = {
'action': action,
'mission': Mission.objects.get(pk=mission.pk).pk,
'user': userPK,
'operation': operationPK,
'drone': dronePK,
'algorithm': algorithmPK
}
serializerMissionLogger = MissionLoggerSerializer(
data=missionLoggerData)
if serializerMissionLogger.is_valid():
createdMissionLogger = serializerMissionLogger.save()
logger.info('Mission Logger is saved successfully.')
else:
msg = 'Mission Logger is not valid. Error: {}.'.format(
serializerMissionLogger.errors)
from .consumers import ErrorMsg
ErrorMsg.set_message_and_error(logger, Drone.objects.get(
pk=dronePK).operation.operation_name, msg)
class MissionRetrieveAPIView(LoginRequiredMixin, generic.ListView):
model = MissionLog
# fields = ('__all__')
template_name = 'aiders/missions.html'
queryset = MissionLog.objects.all()
success_url = reverse_lazy('home')
# def get(self, request, *args, **kwargs):
# context = self.get_context_data()
# return self.render_to_response(context)
#
# # self.object = self.get_object()
# # context = self.get_context_data(object=self.object)
# # return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get the context
operation = Operation.objects.get(
operation_name=self.kwargs.get('operation_name'))
if not self.request.user.has_perm('join_operation', Operation.objects.filter(operation_name=self.kwargs.get('operation_name'))[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
context = super(MissionRetrieveAPIView,
self).get_context_data(**kwargs)
missions = list(MissionLog.objects.filter(
action="START_MISSION", operation=operation))
missionRemoveList = []
for mission in missions:
if not list(MissionLog.objects.filter(mission=mission.mission, action="FINISH_MISSION", operation=operation)):
missionRemoveList.append(mission)
for mission in missionRemoveList:
missions.remove(mission)
context['mission_results'] = missions
context['operation_name'] = self.kwargs.get('operation_name')
# Create any data and add it to the context
return context
# working on Replay mission database to front end
class ReplayMissionOnlineAPIView(LoginRequiredMixin, View):
def format_time(date, prev_date=0):
edit_date = date.astimezone(pytz.timezone(
settings.TIME_ZONE)).strftime("%Y-%m-%dT%H:%M:%S.%f")
if prev_date == edit_date[:-4]+'Z':
edit_date = edit_date[:-5]+str(int(edit_date[-6])+1)+'Z'
else:
edit_date = edit_date[:-4]+'Z'
return edit_date
def save_data(table, time_field_name, description, save_table):
prev_date = 0
time_field_name_edit = time_field_name
for data in table:
time_field_name = time_field_name_edit
if isinstance(time_field_name, list):
for time_field in time_field_name[1:]:
data[time_field] = ReplayMissionOnlineAPIView.format_time(
data[time_field])
time_field_name = time_field_name[0]
data[time_field_name] = ReplayMissionOnlineAPIView.format_time(
data[time_field_name], prev_date)
if data[time_field_name] in save_table:
if description in save_table[data[time_field_name]]:
number = 1
while True:
if description+' '+str(number) in save_table[data[time_field_name]]:
number = number+1
else:
save_table[data[time_field_name]
][description+' '+str(number)] = data
break
else:
save_table[data[time_field_name]][description] = data
else:
save_table[data[time_field_name]] = {}
save_table[data[time_field_name]][description] = data
prev_date = data[time_field_name]
return save_table
def edit_drone_data(drone_list):
for drone in drone_list:
drone['drone_name'] = Drone.objects.get(
pk=drone['drone']).drone_name
return drone_list
def get(self, request, *args, **kwargs):
replay_data = {}
time_series_data = {}
operation_name = self.kwargs.get('operation_name')
mission_id = self.kwargs.get('mission_id')
mission = Mission.objects.get(id=mission_id)
Mission_start = MissionLog.objects.filter(
mission=mission, action="START_MISSION")[0]
Mission_end = MissionLog.objects.filter(
mission=mission, action="FINISH_MISSION").last()
replay_data.update({"start_time": ReplayMissionOnlineAPIView.format_time(
Mission_start.executed_at), "end_time": ReplayMissionOnlineAPIView.format_time(Mission_end.executed_at)})
DronesInOperation = list(Telemetry.objects.filter(operation=Operation.objects.get(operation_name=operation_name), received_at__range=(
Mission_start.executed_at, Mission_end.executed_at)).values('drone').annotate(n=models.Count("pk")))
TelemetryInOperation = list(Telemetry.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), received_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
BuildMapSessionInOperation = list(BuildMapSession.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), start_time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
WeatherStationInOperation = list(WeatherStation.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), current_time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
ErrorMessageInOperation = list(ErrorMessage.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
DetectionSessionInOperation = list(DetectionSession.objects.filter(
operation=Operation.objects.get(operation_name=operation_name)).values())
AlgorithmInOperation = list(Algorithm.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), executed_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
FrontEndUserInputInOperation = list(FrontEndUserInput.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), time__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
Missions = list(Telemetry.objects.filter(operation=Operation.objects.get(operation_name=operation_name), received_at__range=(
Mission_start.executed_at, Mission_end.executed_at)).values('mission_log').annotate(n=models.Count("mission_log__mission")))
missionList = []
for current_mission_data in Missions:
if current_mission_data['mission_log'] != None:
mission = MissionLog.objects.get(
pk=current_mission_data['mission_log']).mission
mission_points = list(mission.mission_points.values())
for mission_point in mission_points:
for field in mission_point:
if isinstance(mission_point[field], point.Point):
mission_point[field] = [float(mission_point[field].coords[0]), float(
mission_point[field].coords[1])]
mission_object = Mission.objects.filter(
id=mission.pk).values().last()
mission_object['mission_points'] = mission_points
mission_object['executed_at'] = ReplayMissionOnlineAPIView.format_time(
mission_object['executed_at'])
mission_object['dronePK'] = MissionLog.objects.get(
pk=current_mission_data['mission_log']).drone.pk
missionList.append(mission_object)
replay_data.update({"mission_data": missionList})
replay_data.update(
{"drone_available": ReplayMissionOnlineAPIView.edit_drone_data(DronesInOperation)})
if TelemetryInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
TelemetryInOperation, 'received_at', 'telemetry', time_series_data)
if BuildMapSessionInOperation != []:
all_images = []
for session in BuildMapSessionInOperation:
BuildMapImageInOperation = list(
BuildMapImage.objects.filter(session=session['id']).values())
all_images = all_images+BuildMapImageInOperation
for image in all_images:
for field in image:
if isinstance(image[field], decimal.Decimal):
image[field] = float(image[field])
if isinstance(image[field], point.Point):
image[field] = [float(image[field].coords[0]), float(
image[field].coords[1])]
time_series_data = ReplayMissionOnlineAPIView.save_data(
all_images, 'time', 'build_map_image', time_series_data)
if DetectionSessionInOperation != []:
for session in DetectionSessionInOperation:
DetectionFrameInOperation = list(DetectionFrame.objects.filter(
detection_session=session['id'], saved_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
for frame in DetectionFrameInOperation:
frame['drone_id'] = Drone.objects.get(
id=session['drone_id']).drone_name
time_series_data = ReplayMissionOnlineAPIView.save_data(
DetectionFrameInOperation, 'saved_at', 'detection_frame', time_series_data)
DetectionObjectsInOperation = list(DetectedObject.objects.filter(
detection_session=session['id'], detected_at__range=(Mission_start.executed_at, Mission_end.executed_at)).values())
for objects in DetectionObjectsInOperation:
objects['drone_id'] = Drone.objects.get(
id=session['drone_id']).drone_name
time_series_data = ReplayMissionOnlineAPIView.save_data(
DetectionObjectsInOperation, 'detected_at', 'detected_object', time_series_data)
if WeatherStationInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
WeatherStationInOperation, 'current_time', 'weather_station', time_series_data)
if AlgorithmInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
AlgorithmInOperation, 'executed_at', 'algorithm', time_series_data)
if ErrorMessageInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
ErrorMessageInOperation, 'time', 'error', time_series_data)
if FrontEndUserInputInOperation != []:
time_series_data = ReplayMissionOnlineAPIView.save_data(
FrontEndUserInputInOperation, 'time', 'user_input', time_series_data)
for drone in DronesInOperation:
RawFrameInOperation = list(RawFrame.objects.filter(
live_stream_session__drone=Drone.objects.get(drone_name=drone['drone_name']), saved_at__range=(
Mission_start.executed_at, Mission_end.executed_at)).values())
time_series_data = ReplayMissionOnlineAPIView.save_data(
RawFrameInOperation, 'saved_at', 'video_frame', time_series_data)
replay_data.update({"time_series_data": time_series_data})
use_online_map = UserPreferences.objects.get(
user=request.user).use_online_map
return render(request, "aiders/replay_mission.html", {
"replay_data": replay_data,
"operation_name": operation_name,
'operation': Operation.objects.get(operation_name=operation_name),
'mission_drone': Mission_start.drone.drone_name,
'use_online_map': use_online_map
})
class TelemetryListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = Telemetry.objects.all().order_by('-received_at')[:10]
serializer_class = TelemetrySerializer
class ControlDeviceDataAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
def control_device_save_data_to_db(jetsonObj):
try:
ControlDevice.objects.create(
drone=jetsonObj['drone'],
cpu_usage=jetsonObj['cpu_usage'],
cpu_core_usage=jetsonObj['cpu_core_usage'],
cpu_core_frequency=jetsonObj['cpu_core_frequency'],
cpu_temp=jetsonObj['cpu_temp'],
cpu_fan_RPM=jetsonObj['cpu_fan_RPM'],
gpu_usage=jetsonObj['gpu_usage'],
gpu_frequency=jetsonObj['gpu_frequency'],
gpu_temp=jetsonObj['gpu_temp'],
ram_usage=jetsonObj['ram_usage'],
swap_usage=jetsonObj['swap_usage'],
swap_cache=jetsonObj['swap_cache'],
emc_usage=jetsonObj['emc_usage'],
)
except Exception as e:
logger.error('Control Device {} Serializer data are not valid. Error: {}.'.format(
jetsonObj["drone"].drone_name, e))
class TelemetryRetrieveAPIView(LoginRequiredMixin, generics.RetrieveUpdateDestroyAPIView):
# queryset = Telemetry.objects.all().select_related('drone')
serializer_class = TelemetrySerializer
def get_object(self):
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
'''
The following query set makes use of the "Lookups that span relationships
# lookups-that-span-relationships
Reference: https://docs.djangoproject.com/en/1.11/topics/db/queries/
'''
obj = Telemetry.objects.filter(drone__drone_name=drone_name).last()
if obj is None:
raise Http404
self.check_object_permissions(self.request, obj)
return obj
def save_telemetry_in_db(telemetryObj):
telemetryObj['water_sampler_in_water'] = water_collector.water_sampler_under_water
serializer = TelemetrySerializer(data=telemetryObj)
if serializer.is_valid():
serializer.save()
else:
msg = 'Telemetry Serializer data are not valid. Error: {}.'.format(
serializer.error)
from .consumers import ErrorMsg
ErrorMsg.set_message_and_error(logger, Drone.objects.get(
pk=telemetryObj.drone).operation.operation_name, msg)
def save_error_drone_data_in_db(errorObj):
serializer = ErrorMessageSerializer(data=errorObj)
if serializer.is_valid():
serializer.save()
else:
logger.error('Error Message Serializer data are not valid. Error: {}.'.format(
serializer.error))
class MissionPointsListCreateAPIView(LoginRequiredMixin, generics.ListCreateAPIView):
queryset = MissionPoint.objects.all()
serializer_class = MissionPointSerializer
def list(self, request, *args, **kwargs):
'''
Overriding the default method. We want a special use case here. We want to list
the mission points for a particular mission for which the specified drone is part od
Args:
request:
*args:
**kwargs:
Returns:
'''
operation_name = self.kwargs.get("operation_name")
drone_name = self.kwargs.get("drone_name")
# Get the mission points for the mission that this drone is currently participating
qs = Drone.objects.filter(drone_name=drone_name, operation=Operation.objects.get(
operation_name=operation_name))
drone = get_object_or_404(qs)
mission = drone.mission
if (not mission):
raise Http404(
"This drone is not in any active missions at the moment")
mission_points = mission.mission_points.all()
queryset = self.filter_queryset(mission_points)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class UserList(LoginRequiredMixin, generics.ListAPIView):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
def get(self, request, *args, **kwargs):
users = User.objects.exclude(username="AnonymousUser")
return render(request, 'aiders/users.html', {'users': users})
class DroneList(LoginRequiredMixin, generics.ListAPIView):
queryset = Drone.objects.all()
serializer_class = DroneSerializer
def get(self, request, *args, **kwargs):
drones = Drone.objects.all()
return render(request, 'aiders/drones.html', {'drones': drones})
def save_drone_to_db(droneObj):
serializer = DroneSerializer(data=droneObj)
if serializer.is_valid():
drone = serializer.save()
logger.info('Drone Serializer id {} is saved.'.format(drone.pk))
else:
logger.error(
'Drone Serializer data are not valid. Error: {}.'.format(serializer.errors))
class UserDetail(LoginRequiredMixin, generics.RetrieveAPIView):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
class AlgorithmRetrieveView(LoginRequiredMixin, View):
queryset = Algorithm.objects.all()
serializer_class = AlgorithmSerializer
def get(self, request, *args, **kwargs):
attribute = self.kwargs.get("attr")
'''
Retrieve the algorithm with the specified id
but only the "input" or "output" attribute
'''
if attribute != "input" and attribute != "output":
return Response(status=status.HTTP_400_BAD_REQUEST)
pk = self.kwargs.get("pk")
algorithm = get_object_or_404(Algorithm.objects.filter(pk=pk))
serializer = AlgorithmSerializer(algorithm)
# res = Response(serializer.data)
# attr = res.data.get(attribute)
# res.data = attr
attr_json = serializer.data.get(attribute)
attr_html_tbale = json2html.convert(json=attr_json)
return render(request, 'aiders/algorithm_info.html', {'attr_name': attribute, 'attr_object_html_format': attr_html_tbale})
# return serializer.data.get(attribute)
# if (attribute == 'input'):
# serializer = AlgorithmSerializer(algorithm)
# res = Response(serializer.data)
# return res
# elif (attribute == 'output'):
# return Response(status=status.HTTP_404_NOT_FOUND)
# qs = Algorithm.objects.filter(pk=pk).only('output').values()
# obj = get_object_or_404(qs)
# self.check_object_permissions(self.request, obj)
# return obj
# Get first object from all objects on Algorithm
# obj = Algorithm.objects.all().first()
# self.check_object_permissions(self.request, obj)
# return obj
def save_algorithm_to_db(algorithmObj):
serializer = AlgorithmSerializer(data=algorithmObj)
if serializer.is_valid():
serializer.save()
logger.info('Algorithm Serializer is saved.')
else:
logger.error('Algorithm Serializer data are not valid. Error: {}.'.format(
serializer.errors))
class ManageOperationsView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
operations = Operation.objects.all()
users = User.objects.all()
return render(request, 'aiders/manage_operations.html', {'operations': operations, 'users': users, 'use_online_maps': False})
# class JoinOperationView(LoginRequiredMixin,View):
# def get(self, request, *args, **kwargs):
# operation_id = self.kwargs.get("operation_id")
# operation = Operation.objects.get(pk=operation_id)
# return render(request, 'aiders/join_operation.html', {'operation': operation})
class ManagePermissionsView(LoginRequiredMixin, generic.UpdateView):
def get(self, request, *args, **kwargs):
if not request.user.has_perm('aiders.edit_permissions'):
raise PermissionDenied(
"You do not have permission to read the permissions.")
users = User.objects.exclude(username="AnonymousUser")
for user in users:
user.permission_edit_permissions = user.has_perm(
'aiders.edit_permissions')
user.permission_create_operations = user.has_perm(
'aiders.create_operations')
user.save()
operation_groups = ''
all_groups = Group.objects.all()
for group in all_groups:
if(str(group.name).__contains__(' operation join')):
operation_groups = operation_groups + \
(group.name).replace(' operation join', '')+','
return render(request, 'aiders/manage_permissions.html', {'users': users, 'all_groups': operation_groups})
def post(self, request, *args, **kwargs):
if not request.user.has_perm('aiders.edit_permissions'):
raise PermissionDenied(
"You do not have permission to change the permissions.")
for user in User.objects.exclude(username="AnonymousUser"):
User.update_permissions(user.id, 'permission_edit_permissions', str(
user.id) in request.POST.getlist('permission_edit_permissions'))
User.update_permissions(user.id, 'permission_create_operations', str(
user.id) in request.POST.getlist('permission_create_operations'))
users = User.objects.exclude(username="AnonymousUser")
for user in users:
user.permission_edit_permissions = user.has_perm(
'aiders.edit_permissions')
user.permission_create_operations = user.has_perm(
'aiders.create_operations')
user.save()
operation_groups = ''
all_groups = Group.objects.all()
for group in all_groups:
if(str(group.name).__contains__(' operation join')):
operation_groups = operation_groups + \
(group.name).replace(' operation join', '')+','
return render(request, 'aiders/manage_permissions.html', {'users': users, 'all_groups': operation_groups}, status=status.HTTP_202_ACCEPTED)
class ManageUserPermissionsView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
if not request.user.has_perm('aiders.edit_permissions'):
raise PermissionDenied(
"You do not have permission to change the permissions.")
user_name = self.kwargs.get("user_name")
group_list = request.POST.get('selected')
group_list = group_list.split(',')
for group in Group.objects.all():
if(str(group.name).__contains__(' operation join')):
User.objects.filter(username=user_name)[0].groups.remove(group)
for group_name in group_list:
group_object = Group.objects.filter(
name=group_name+" operation join").last()
User.objects.filter(username=user_name)[0].groups.add(group_object)
return HttpResponse(status=status.HTTP_200_OK)
def index(request):
'''
Triggered when the main page of the web app is loaded on browser
:param request:
'''
context = {'auth_form': AuthenticationForm}
if request.user.is_authenticated:
for drone in Drone.objects.filter(water_sampler_available=True):
p = threading.Thread(
target=water_collector.check_sensor, args=(drone.drone_name,))
p.start()
userQuery = User.objects.filter(pk=request.user.id)
user = get_object_or_404(userQuery)
joined_op_obj = user.joined_operation
if (joined_op_obj):
if request.method == 'POST':
previous_page = resolve(
request.POST.get('next', '/')).func.view_class
'''
If we got here on the main page after a POST request, that means user posted some data from a form
'''
if (previous_page == AlgorithmListView):
'''
Check if we got here after user selected to show results for some algorithms (That is, if we got here from aiders/algorithms.html)
If this is the case, save the results to the request session and then redirect again to this page
This is because if we don't redirect, the "POST" request will persist.
Reference: https://stackoverflow.com/a/49178154/15290071
'''
algorithm_result_ids = request.POST.getlist(
'checkedAlgoResultIDs')
request.session['checkedAlgoResultIDs'] = algorithm_result_ids
return HttpResponseRedirect(reverse('home'))
if (previous_page == MissionRetrieveAPIView):
mission_ids = request.POST.getlist('checkedMissionIDs')
request.session['checkedMissionIDs'] = mission_ids
return HttpResponseRedirect(reverse('home'))
elif request.method == 'GET':
context = {'operation': joined_op_obj,
'net_ip': os.environ.get("NET_IP", "localhost")}
'''
Check if there are any results to show for the algorithms
'''
user_wants_to_load_algorithm_results_on_map = True if request.session.get(
'checkedAlgoResultIDs') != None else False
user_wants_to_load_missions_on_map = True if request.session.get(
'checkedMissionIDs') != None else False
if (user_wants_to_load_algorithm_results_on_map):
algorithm_result_ids = request.session.get(
'checkedAlgoResultIDs')
try:
qs = Algorithm.objects.filter(
pk__in=algorithm_result_ids)
algorithm_results = get_list_or_404(qs)
algorithm_results = core_serializers.serialize(
'json', algorithm_results, fields=('pk', 'algorithm_name', 'output'))
context['algorithm_results'] = algorithm_results
del request.session['checkedAlgoResultIDs']
except:
'''
Remove the algorithm results from context if the user doesn't select an algorithm
'''
context.pop("algorithm_results", None)
else:
'''
Remove the algorithm results from context if they exist.
user does not want to load any results on the map
e.g If the previous screen was the 'login' page, user just wants to log in, not to display any algorithm results
'''
context.pop("algorithm_results", None)
else:
context = {'join_operation_form': JoinOperationForm}
use_online_map = UserPreferences.objects.get(
user=request.user).use_online_map
# context = {'auth_form': AuthenticationForm,'use_online_map':use_online_map}
context['use_online_map'] = use_online_map
return render(request, 'aiders/index.html', context)
return render(request, 'aiders/login.html', context)
class DroneModifyOperationView(LoginRequiredMixin, generic.UpdateView):
def get(self, request, *args, **kwargs):
drone_name = self.kwargs.get("drone_name")
response = Operation.objects.filter(
drones_to_operate=Drone.objects.get(drone_name=drone_name).pk)
response = core_serializers.serialize('json', response)
drone_data = Drone.objects.get(drone_name=drone_name)
response = json.loads(response)
for data in response:
if str(data['fields']['operation_name']) == str(drone_data.operation):
data['fields'].update({'Selected': 'Selected'})
response = json.dumps(response)
return HttpResponse(response)
def post(self, request, *args, **kwargs):
operation_name = request.POST['operation_name']
drone_name = self.kwargs.get('drone_name')
drone = Drone.objects.get(drone_name=drone_name)
if operation_name == "None":
drone.operation = None
drone.save()
else:
try:
drone.operation = Operation.objects.get(
operation_name=operation_name)
drone.save()
except:
return HttpResponseNotFound("Operation not found", status=status.HTTP_400_BAD_REQUEST)
return HttpResponse(drone_name, status=status.HTTP_202_ACCEPTED)
class BuildMapAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
operation_name = self.kwargs.get('operation_name')
drone_name = request.POST.get('drone_id')
start_build_map = request.POST.get('start_build_map_boolean')
multispectral_build_map = request.POST.get(
'start_multispectral_build_map')
overlap = request.POST.get("overlap")
if start_build_map == 'true':
build_map_request_handler.buildMapPublisherSingleMessage(
drone_name, True, overlap)
logger.info(
'User sending build map request Start for drone {}.'.format(drone_name))
buildSessionActive = BuildMapSession.objects.filter(user=User.objects.get(username=request.user.username), operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name)).last()
droneActive = Drone.objects.get(
drone_name=drone_name).build_map_activated
if buildSessionActive == None:
BuildMapSession.objects.create(user=User.objects.get(username=request.user.username), operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name), folder_path=Constants.BUILD_MAP_DIR_PREFIX + drone_name + "_")
drone = Drone.objects.get(drone_name=drone_name)
drone.build_map_activated = True
drone.save()
return HttpResponse(status=status.HTTP_202_ACCEPTED)
else:
if buildSessionActive.is_active != True and droneActive != True:
BuildMapSession.objects.create(user=User.objects.get(username=request.user.username), operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name), folder_path=Constants.BUILD_MAP_DIR_PREFIX + drone_name + "_")
drone = Drone.objects.get(drone_name=drone_name)
drone.build_map_activated = True
drone.save()
return HttpResponse(status=status.HTTP_202_ACCEPTED)
elif start_build_map == 'false':
build_map_request_handler.buildMapPublisherSingleMessage(
drone_name, False, overlap)
logger.info(
'User sending build map request Stop for drone {}.'.format(drone_name))
drone = Drone.objects.get(drone_name=drone_name)
drone.build_map_activated = False
drone.save()
BuildMapSession.objects.filter(operation=Operation.objects.get(operation_name=operation_name), drone=Drone.objects.get(
drone_name=drone_name), is_active=True).update(end_time=datetime.datetime.now(tz=Constants.CYPRUS_TIMEZONE_OBJ), is_active=False)
return HttpResponse(status=status.HTTP_202_ACCEPTED)
logger.error(
'Encounter an error when user send a build map request for drone {}.'.format(drone_name))
return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
class LidarPointsAPIView(LoginRequiredMixin, generic.UpdateView):
def save_point_in_db(data, dji_name, lidar_session):
if LidarPointSession.objects.get(id=lidar_session.id).is_active == True:
LidarPoint.objects.create(
points=data,
lat=None,
lon=None,
drone=Drone.objects.get(drone_name=dji_name),
lidar_point_session=lidar_session
)
class BuildMapGetLastImageAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
operation_name = self.kwargs.get('operation_name')
drone_name = request.POST.get('drone_id')
Session = BuildMapSession.objects.filter(operation=Operation.objects.get(operation_name=operation_name), drone=Drone.objects.get(
drone_name=drone_name)).last() # operation=Operation.objects.get(operation_name=operation_name),
try:
image = Session.images.all().last()
image = model_to_dict(image)
except:
logger.error(
'Encounter an error while searching for a Build Map image for drone {}.'.format(drone_name))
return HttpResponse('', status=status.HTTP_404_NOT_FOUND)
image['top_left'] = [float(image['top_left'].coords[0]), float(
image['top_left'].coords[1])]
image['top_right'] = [float(image['top_right'].coords[0]), float(
image['top_right'].coords[1])]
image['bottom_left'] = [float(image['bottom_left'].coords[0]), float(
image['bottom_left'].coords[1])]
image['bottom_right'] = [float(image['bottom_right'].coords[0]), float(
image['bottom_right'].coords[1])]
image['centre'] = [float(image['centre'].coords[0]), float(
image['centre'].coords[1])]
image['altitude'] = float(image['altitude'])
image['bearing'] = float(image['bearing'])
logger.info(
'Found Build Map Image Successfully for drone {}.'.format(drone_name))
return HttpResponse(json.dumps(image), status=status.HTTP_202_ACCEPTED)
class BuildMapGetLastAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
operation_name = self.kwargs.get('operation_name')
drone_name = request.POST.get('drone_id')
buildMapSession = BuildMapSession.objects.filter(operation=Operation.objects.get(
operation_name=operation_name), drone=Drone.objects.get(drone_name=drone_name)).last()
if buildMapSession == None:
logger.error(
'Encounter an error while getting last image from Build Map Session for drone {}.'.format(drone_name))
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
dictionary = {}
dictionary['id'] = buildMapSession.pk
dictionary['user'] = buildMapSession.user.username
dictionary['drone_id'] = buildMapSession.drone.drone_name
dictionary['start_time'] = str(
buildMapSession.start_time.date())+" "+str(buildMapSession.start_time.time())
response = json.dumps(dictionary)
logger.info(
'Found Build Map Session Successfully for drone {}.'.format(drone_name))
return HttpResponse(response, status=status.HTTP_202_ACCEPTED)
@ csrf_exempt
def BuildMapImageView(request):
if request.method == 'POST':
img_file = request.FILES.get('image_file')
img_name = request.POST.get('image_name')
drone_name = request.POST.get('drone_name')
drone_bearing = float(request.POST.get('bearing'))
drone_alt = float(request.POST.get('alt'))
drone_lat = float(request.POST.get('lat'))
drone_lon = float(request.POST.get('lon'))
extra_data = False
try:
d_roll = float(request.POST.get('d_roll'))
d_pitch = float(request.POST.get('d_pitch'))
d_yaw = float(request.POST.get('d_yaw'))
g_roll = float(request.POST.get('g_roll'))
g_pitch = float(request.POST.get('g_pitch'))
g_yaw = float(request.POST.get('g_yaw'))
extra_data = True
except:
extra_data = False
drone_instance = Drone.objects.get(drone_name=drone_name)
# if extra_data:
# # drone_bearing=drone_bearing+5
# drone_lat, drone_lon=img_georeference.high_accuracy_image_center(drone_lat, drone_lon, drone_alt, d_pitch, d_roll, drone_bearing)
destinations = img_georeference.calcPoints(
drone_lat, drone_lon, drone_bearing, drone_alt, img_name, drone_instance.model, drone_instance.camera_model)
try:
if drone_instance.is_connected_with_platform and drone_instance.build_map_activated:
Session = BuildMapSession.objects.filter(
drone=Drone.objects.get(drone_name=drone_name)).last()
Image.open(img_file)
file_name = default_storage.save(os.path.join(
Session.folder_path, img_file.name), img_file)
if extra_data:
image = BuildMapImage.objects.create(
path=Session.folder_path+'/'+img_name,
top_left=Point(
destinations[2].longitude, destinations[2].latitude),
top_right=Point(
destinations[0].longitude, destinations[0].latitude),
bottom_left=Point(
destinations[1].longitude, destinations[1].latitude),
bottom_right=Point(
destinations[3].longitude, destinations[3].latitude),
centre=Point(drone_lon, drone_lat),
altitude=Decimal(drone_alt),
bearing=Decimal(drone_bearing),
d_roll=d_roll,
d_pitch=d_pitch,
d_yaw=d_yaw,
g_roll=g_roll,
g_pitch=g_pitch,
g_yaw=g_yaw,
session=Session,
)
else:
image = BuildMapImage.objects.create(
path=Session.folder_path+'/'+img_name,
top_left=Point(
destinations[2].longitude, destinations[2].latitude),
top_right=Point(
destinations[0].longitude, destinations[0].latitude),
bottom_left=Point(
destinations[1].longitude, destinations[1].latitude),
bottom_right=Point(
destinations[3].longitude, destinations[3].latitude),
centre=Point(drone_lon, drone_lat),
altitude=Decimal(drone_alt),
bearing=Decimal(drone_bearing),
d_roll=None,
d_pitch=None,
d_yaw=None,
g_roll=None,
g_pitch=None,
g_yaw=None,
session=Session,
)
logger.info(
'Saved Image Successfully for Build Map Session {}.'.format(Session.id))
return HttpResponse({'status:success'}, status=status.HTTP_200_OK)
except Exception as e:
print(e)
return HttpResponse({'status:failed'}, status=status.HTTP_400_BAD_REQUEST)
class BuildMapLoadAPIView(LoginRequiredMixin, generic.UpdateView):
def get(self, request, *args, **kwargs):
operation = Operation.objects.get(
operation_name=self.kwargs['operation_name'])
list_of_operation = list(operation.buildmapsession_set.all())
response = []
for data in list_of_operation:
dictionary = {}
dictionary['id'] = data.pk
dictionary['user'] = data.user.username
dictionary['drone_id'] = data.drone.drone_name
dictionary['start_time'] = str(
data.start_time.date())+" "+str(data.start_time.time())
dictionary['end_time'] = str(
data.end_time.date())+" " + str(data.end_time.time())
# Checks if the Session haves images
if list(BuildMapImage.objects.filter(session=data)) != []:
response.append(dictionary)
json_string = json.dumps(response)
return HttpResponse(json_string)
def post(self, request, *args, **kwargs):
try:
build_map_id = json.loads(
request.body.decode('utf-8'))['build_map_id']
except:
return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
print(build_map_id)
map_build = list(BuildMapImage.objects.filter(
session_id=build_map_id).values())
print(map_build)
for data in map_build:
data['time'] = str(data['time'])
data['top_left'] = [float(data['top_left'].coords[0]), float(
data['top_left'].coords[1])]
data['top_right'] = [float(data['top_right'].coords[0]), float(
data['top_right'].coords[1])]
data['bottom_left'] = [float(data['bottom_left'].coords[0]), float(
data['bottom_left'].coords[1])]
data['bottom_right'] = [float(data['bottom_right'].coords[0]), float(
data['bottom_right'].coords[1])]
data['centre'] = [float(data['centre'].coords[0]), float(
data['centre'].coords[1])]
data['altitude'] = float(data['altitude'])
data['bearing'] = float(data['bearing'])
json_string = json.dumps(map_build)
return HttpResponse(json_string, status=status.HTTP_201_CREATED)
class FirePredictionCreateAPIView(LoginRequiredMixin, generic.UpdateView):
def post(self, request, *args, **kwargs):
for jsonPostData in request:
try:
PostData = json.loads(jsonPostData)
if PostData['user']:
operation = Operation.objects.get(
operation_name=self.kwargs['operation_name'])
operationPK = operation.pk
user = User.objects.get(username=PostData['user'])
userPK = user.pk
algorithmName = 'FIRE_PROPAGATION_ALGORITHM'
canBeLoadedOnMap = True
input = PostData
del input['user']
try:
output = utils.handleAlgorithmExecution(
operationPK, input, canBeLoadedOnMap, algorithmName, userPK)
except Exception as e:
print(e)
return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
response = '['+str(output)+']'
return HttpResponse(response, status=status.HTTP_201_CREATED)
except:
pass
raise Http404
def login_view(request):
if request.method == 'GET':
redirect_to = request.GET.get('next')
if request.user.is_authenticated:
if redirect_to != None:
return HttpResponseRedirect(redirect_to)
return HttpResponseRedirect(reverse('manage_operations'))
return render(request, 'aiders/login.html', {'auth_form': AuthenticationForm, 'next': redirect_to})
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
redirect_to = request.POST['next']
user = authenticate(request, username=username, password=password)
if user is not None:
if user.is_active:
if request.META.get('HTTP_X_FORWARDED_FOR'):
ip = request.META.get('HTTP_X_FORWARDED_FOR')
else:
ip = request.META.get('REMOTE_ADDR')
from user_agents import parse
user_agent = parse(request.META.get('HTTP_USER_AGENT'))
'''
When user logs in, save a few data that concern their machine
'''
terminal = Terminal(ip_address=ip, user=user,
os=user_agent.os.family,
device=user_agent.device.family,
logged_in=True,
browser=user_agent.browser.family)
terminal.save()
if not UserPreferences.objects.filter(user=user).exists():
UserPreferences.objects.create(
use_online_map=True, user=user)
login(request, user,
backend='django.contrib.auth.backends.ModelBackend')
if redirect_to != "None":
return HttpResponseRedirect(redirect_to)
return redirect('manage_operations')
else:
messages.error(request, 'Wrong username or password!')
return render(request, 'aiders/login.html', {'auth_form': AuthenticationForm, 'next': redirect_to})
def logout_view(request):
logout(request)
# Redirect to a success page
return redirect('login')
class NewOperationForm(LoginRequiredMixin, SessionWizardView):
template_name = 'aiders/operation_new_wizard.html'
def get_form_initial(self, step):
if not self.request.user.has_perm('aiders.create_operations'):
raise PermissionDenied(
"You do not have permission to create the operation.")
def done(self, form_list, form_dict, **kwargs):
wizard_form = {k: v for form in form_list for k,
v in form.cleaned_data.items()}
operation_instance = Operation.objects.none()
wizard_form["operator"] = self.request.user
operation_instance = Operation.objects.create(
operation_name=wizard_form["operation_name"],
location=wizard_form["location"],
description=wizard_form["description"],
operator=wizard_form["operator"],
)
drone_allow_list = Drone.objects.none()
for drone_id in form_list[1].data.getlist('Drones in'):
drone_allow_list = drone_allow_list | Drone.objects.filter(
pk=drone_id)
if form_list[1].data.getlist('drone_operation') == ['True']:
print(Drone.objects.get(pk=drone_id).operation)
if Drone.objects.get(pk=drone_id).operation == None or Drone.objects.get(pk=drone_id).is_connected_with_platform == False:
Drone.objects.filter(pk=drone_id).update(
operation=operation_instance)
operation_instance.drones_to_operate.set(drone_allow_list)
group_join_operation = Group.objects.create(
name=operation_instance.operation_name+" operation join")
group_edit_operation = Group.objects.create(
name=operation_instance.operation_name+" operation edit")
assign_perm('join_operation', group_join_operation, operation_instance)
assign_perm('edit_operation', group_edit_operation, operation_instance)
for user_id in form_list[1].data.getlist('Users in'):
User.objects.filter(pk=user_id)[0].groups.add(group_join_operation)
wizard_form["operator"].groups.add(group_edit_operation)
logger.info('Operation with id {} is created successfully.'.format(
operation_instance.pk))
return redirect('manage_operations')
class EditOperationForm(LoginRequiredMixin, SessionWizardView):
template_name = 'aiders/operation_edit_wizard.html'
def get_form_initial(self, step):
operation_name = self.kwargs['operation_name']
operation = Operation.objects.get(operation_name=operation_name)
if self.request.user.has_perm('edit_operation', operation):
if 'operation_name' in self.kwargs and step == '0':
operation_dict = model_to_dict(operation)
return operation_dict
else:
return self.initial_dict.get(step, {})
else:
raise PermissionDenied(
"You do not have permission to change the operation.")
def get_context_data(self, form, **kwargs):
context = super(EditOperationForm, self).get_context_data(
form=form, **kwargs)
if self.steps.current == '1':
initial = {
'users_in': [],
'users_out': [],
}
operation_name = self.kwargs['operation_name']
operation = Operation.objects.get(operation_name=operation_name)
operation_drones_dict = model_to_dict(operation)
all_drones = list(Drone.objects.all())
for user in User.objects.all():
# Don't display the 'AnonymousUser' on the user list. We don't care about anonymous users
if not user.username == 'AnonymousUser':
if user.has_perm('join_operation', operation):
initial['users_in'].append(user)
else:
initial['users_out'].append(user)
context.update({'drones_allow': set(all_drones) & set(
operation_drones_dict['drones_to_operate'])})
context.update({'drones_all': set(all_drones) ^ set(
operation_drones_dict['drones_to_operate'])})
context.update({'users_allow': initial['users_in']})
context.update({'users_all': initial['users_out']})
context.update({'edit_form': True})
return context
def done(self, form_list, form_dict, **kwargs):
wizard_form = {k: v for form in form_list for k,
v in form.cleaned_data.items()}
drone_allow_list = Drone.objects.none()
operation_name = self.kwargs['operation_name']
operation_instance = Operation.objects.get(
operation_name=operation_name)
for drone_id in form_list[1].data.getlist('Drones in'):
drone_allow_list = drone_allow_list | Drone.objects.filter(
pk=drone_id)
if form_list[1].data.getlist('drone_operation') == ['True']:
print(Drone.objects.get(pk=drone_id).operation)
if Drone.objects.get(pk=drone_id).operation == None or Drone.objects.get(pk=drone_id).is_connected_with_platform == False:
Drone.objects.filter(pk=drone_id).update(
operation=operation_instance)
operation_instance.location = wizard_form['location']
operation_instance.description = wizard_form['description']
operation_instance.drones_to_operate.set(drone_allow_list)
operation_instance.save()
Group.objects.get(
name=operation_instance.operation_name+" operation join").delete()
group = Group.objects.create(
name=operation_instance.operation_name+" operation join")
assign_perm('join_operation', group, operation_instance)
for user_id in form_list[1].data.getlist('Users in'):
User.objects.filter(pk=user_id)[0].groups.add(group)
# Iterate over the drones that are NOT allowed on this operation.
# If these drones were until now joined on this operation, kick them out
notAllowedDrones = form_list[1].data.getlist('Drones out')
for dronePK in notAllowedDrones:
droneInstance = Drone.objects.get(pk=dronePK)
if droneInstance.operation == operation_instance:
Drone.objects.filter(
drone_name=droneInstance.drone_name).update(operation=None)
return redirect('manage_operations')
class ExecuteAlgorithmAPIView(LoginRequiredMixin, APIView):
def post(self, request, *args, **kwargs):
operation_name = kwargs['operation_name']
operation = Operation.objects.get(operation_name=operation_name)
userPK = request.user.pk
operationPK = operation.pk
algorithmDetails = request.data
algorithmName = algorithmDetails['algorithmName']
input = algorithmDetails['input']
canBeLoadedOnMap = algorithmDetails['canBeLoadedOnMap']
output = utils.handleAlgorithmExecution(
operationPK, input, canBeLoadedOnMap, algorithmName, userPK)
return Response(output)
class ExecuteMissionAPIView(LoginRequiredMixin, APIView):
def get(self, request, *args, **kwargs):
operation_name = kwargs['operation_name']
drone_name = kwargs['drone_name']
user = request.user
operation = Operation.objects.get(operation_name=operation_name)
drone = Drone.objects.get(drone_name=drone_name)
mission_log = MissionLog.objects.filter(
action='START_MISSION', user=user.pk, drone=drone, operation=operation).last()
return Response(mission_log.mission.mission_type)
def post(self, request, *args, **kwargs):
# print("Request of the Execute Mission:", request, "\nand kwargs:", kwargs)
operation_name = kwargs['operation_name']
drone_name = kwargs['drone_name']
actionDetails = request.data
user_name = request.user.username
operation = Operation.objects.get(operation_name=operation_name)
User = get_user_model()
action = actionDetails['action']
grid = actionDetails['grid']
captureAndStoreImages = actionDetails['captureAndStoreImages']
missionPath = actionDetails['mission_points']
dronePK = Drone.objects.get(drone_name=drone_name).pk
try:
missionType = actionDetails['mission_type']
except:
missionType = None
# if missionType == Mission.NORMAL_MISSION:
mission_request_handler.publishMissionToRos(
operation.pk, missionType, drone_name, grid, captureAndStoreImages, missionPath, action, request.user.pk, dronePK)
# elif missionType == Mission.SEARCH_AND_RESCUE_MISSION:
# utils.handleAlgorithmExecution(operation.pk, input, canBeLoadedOnMap, userPK, algorithmName)
# pass
return Response(status=status.HTTP_200_OK)
class AlgorithmListView(LoginRequiredMixin, generic.ListView):
model = Algorithm
# fields = ('__all__')
template_name = 'aiders/algorithms.html'
queryset = Algorithm.objects.all()
success_url = reverse_lazy('home')
# def get(self, request, *args, **kwargs):
# context = self.get_context_data()
# return self.render_to_response(context)
#
# # self.object = self.get_object()
# # context = self.get_context_data(object=self.object)
# # return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get the context
operation = Operation.objects.get(
operation_name=self.kwargs.get('operation_name'))
if not self.request.user.has_perm('join_operation', Operation.objects.filter(operation_name=self.kwargs.get('operation_name'))[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
# User has to join the operation in order to view the operation's algorithms
User.objects.filter(pk=self.request.user.id).update(
joined_operation=operation)
context = super(AlgorithmListView, self).get_context_data(**kwargs)
context['algorithm_results'] = operation.algorithm_set.all()
context['operation_name'] = self.kwargs.get('operation_name')
# Create any data and add it to the context
return context
@ login_required
@ csrf_protect
def stop_operation_view(request, operation_name):
if request.method == 'GET':
opQuery = Operation.objects.filter(operation_name=operation_name)
if (opQuery.exists()):
operation = get_object_or_404(opQuery)
if (operation.active):
operation.active = False
operation.save()
return redirect('manage_operations')
@ login_required
@ csrf_protect
def leave_operation_view(request):
if request.method == 'GET':
get_user_model().objects.filter(pk=request.user.id).update(joined_operation=None)
return redirect('manage_operations')
# if (userQuery.exists()):
# get_object_or_404(userQuery).update(joined_operation=None)
# user.joined_operation = None
# user.save()
# return redirect('home')
@ login_required
@ csrf_protect
def join_operation_view(request, operation_name):
if not request.user.has_perm('join_operation', Operation.objects.filter(operation_name=operation_name)[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
if request.method == 'POST':
opQuery = Operation.objects.filter(operation_name=operation_name)
if (opQuery.exists()):
operation = get_object_or_404(opQuery)
if (operation.active):
User.objects.filter(pk=request.user.id).update(
joined_operation=operation)
# get_object_or_404(user_query)
return redirect('home')
else:
raise Http404('Operation Not Found')
else:
raise Http404('Operation Not Found')
return JsonResponse({'success': False})
@ csrf_protect
def register_request(request):
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('manage_operations')
else:
form = NewUserForm()
return render(request=request, template_name='aiders/register.html', context={"register_form": form})
class DetectionAPIOperations():
@ staticmethod
def create_detection_session_on_db(user, operation, drone):
return DetectionSession.objects.create(
user=user,
operation=operation,
drone=drone
)
@ staticmethod
def save_frame_to_db(frame_file, detection_session):
detFrame = DetectionFrame.objects.create(
frame=frame_file,
detection_session=detection_session,
)
return detFrame
@ staticmethod
def update_detection_status_on_db(drone, detection_status, detection_type_str):
qs = Detection.objects.filter(drone__drone_name=drone.drone_name).update(
detection_status=detection_status, detection_type_str=detection_type_str)
@ staticmethod
def update_detection_session_end_time(detection_session):
end_time = datetime.datetime.now(tz=Constants.CYPRUS_TIMEZONE_OBJ)
DetectionSession.objects.filter(pk=detection_session.id).update(
end_time=end_time, is_active=False)
@ staticmethod
def update_latest_frame(detection_session, latest_frame_url):
DetectionSession.objects.filter(pk=detection_session.id).\
update(latest_frame_url=latest_frame_url)
@ staticmethod
def save_detected_object_to_db(detection_session, detectedObj, frame):
DetectedObject.objects.create(
track_id=detectedObj.trk_id,
label=detectedObj.label,
lat=detectedObj.lat,
lon=detectedObj.lon,
detection_session=detection_session,
distance_from_drone=detectedObj.distFromDrone,
frame=frame
)
class LiveStreamAPIOperations(LoginRequiredMixin, generics.RetrieveAPIView):
# def get(self, request, *args, **kwargs):
# operation_name=self.kwargs.get('operation_name')
# drone_name = self.kwargs.get('drone_name')
@ staticmethod
def create_live_stream_session_on_db(drone):
return LiveStreamSession.objects.create(
drone=drone
)
@ staticmethod
def save_raw_frame_to_db(frame_file, drone_name, live_stream_session):
detFrame = RawFrame.objects.create(
frame=frame_file,
drone=Drone.objects.get(drone_name=drone_name),
live_stream_session=live_stream_session,
)
return detFrame
@ staticmethod
def update_latest_raw_frame(live_stream_session, latest_frame_url):
LiveStreamSession.objects.filter(pk=live_stream_session.id).\
update(latest_frame_url=latest_frame_url)
@ api_view(['GET'])
def objects_detected_on_last_frame_api_view(request, operation_name, drone_name):
if request.method == 'GET':
try:
active_detection_session = DetectionSession.objects.filter(
is_active=True, operation__operation_name=operation_name, drone__drone_name=drone_name)
active_detection_session = DetectionSession.objects.get(
is_active=True, operation__operation_name=operation_name, drone__drone_name=drone_name)
# Get the last frame object for the active detection session
latest_frame = DetectionFrame.objects.filter(
detection_session=active_detection_session).last()
# Get the detected objects that appear on the last frame
detected_objects = DetectedObject.objects.filter(
frame=latest_frame)
except DetectionSession.DoesNotExist:
return Response({'error': Constants.NO_ACTIVE_DETECTION_SESSION_ERROR_MESSAGE})
if detected_objects == None:
return Response({'error': "No objects detected on last frame"})
serializer = DetectedObjectSerializer(detected_objects, many=True)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def last_detection_frame_api_view(request, operation_name, drone_name):
if request.method == 'GET':
try:
active_detection_session = DetectionSession.objects.get(
is_active=True, drone__drone_name=drone_name)
except DetectionSession.DoesNotExist:
return Response({'latest_frame_url': Constants.NO_ACTIVE_DETECTION_SESSION_ERROR_MESSAGE})
serializer = DetectionSessionSerializer(active_detection_session)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def last_raw_frame_api_view(request, operation_name, drone_name):
if request.method == 'GET':
try:
active_detection_session = LiveStreamSession.objects.get(
is_active=True, drone__drone_name=drone_name)
except LiveStreamSession.DoesNotExist:
return Response({'latest_frame_url': Constants.NO_ACTIVE_LIVE_STREAM_SESSION_ERROR_MESSAGE})
serializer = LiveStreamSessionSerializer(active_detection_session)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def detection_types_api_view(request, operation_name):
if request.method == 'GET':
from logic.algorithms.object_detection.src.models.label import \
get_labels_all
return Response({'detection_types': list(get_labels_all())})
return Response(status=status.HTTP_400_BAD_REQUEST)
@ api_view(['GET'])
def live_stream_status_api_view(request, operation_name, drone_name):
if request.method == 'GET':
liveStreamSession = LiveStreamSession.objects.get(
drone__drone_name=drone_name)
if (liveStreamSession.is_active):
return Response({'is_live_stream_active': True})
else:
return Response({'is_live_stream_active': False})
return Response(status=status.HTTP_400_BAD_REQUEST)
class WeatherLiveAPIView(LoginRequiredMixin, APIView):
def post(self, request, *args, **kwargs):
ThreadRunningPub = False
ThreadRunningSub = False
threadName = []
for thread in threading.enumerate():
if thread.name == 'MainWeatherPublisher':
threadName.append(thread)
ThreadRunningPub = True
elif thread.name == 'MainWeatherSubscriber':
threadName.append(thread)
ThreadRunningSub = True
if request.data['state'] == 'true':
operation_name = self.kwargs.get('operation_name')
operation_name = operation_name.replace(' ', '~')
if ThreadRunningPub == False:
publisherThread = MyThread(name='MainWeatherPublisher', target=weather_station_ros_publisher.main, args=(
operation_name, 'MainWeatherPublisher'))
sys.argv = Constants.START_WEATHER_DATA_PUBLISHER_SCRIPT[1:]
publisherThread.start()
if ThreadRunningSub == False:
subscriberThread = MyThread(name='MainWeatherSubscriber', target=weather_station_ros_subscriber.main, args=(
operation_name, 'MainWeatherSubscriber'))
subscriberThread.start()
else:
for thread in threadName:
thread.stop()
return HttpResponse('Threads up', status=status.HTTP_200_OK)
class WeatherStationAPIView(LoginRequiredMixin, generics.RetrieveAPIView):
queryset = WeatherStation.objects.all()
serializer_class = WeatherStationSerializer
def addWeatherStationDataToDB(data, object_name):
object_name = object_name.replace('~', ' ')
try:
operation_name = Operation.objects.get(operation_name=object_name)
WeatherStation.objects.create(
wind_speed=data.speed,
wind_direction=data.direction,
temperature=data.temperature,
pressure=data.pressure,
humidity=data.humidity,
heading=data.heading,
operation=Operation.objects.get(operation_name=operation_name),
drone=None,
)
except Operation.DoesNotExist:
operation_name = None
try:
drone_name = Drone.objects.get(drone_name=object_name)
WeatherStation.objects.create(
wind_speed=data.speed,
wind_direction=data.direction,
temperature=data.temperature,
pressure=data.pressure,
humidity=data.humidity,
heading=data.heading,
operation=None,
drone=Drone.objects.get(drone_name=drone_name),
)
except Drone.DoesNotExist:
drone_name = None
def system_monitoring_save_to_db(cpu_usage, cpu_core_usage, cpu_temp, gpu_usage, gpu_memory, gpu_temp, ram_usage, swap_memory_usage, temp, mb_new_sent, mb_new_received, mb_new_total, disk_read, disk_write, battery_percentage):
SystemMonitoring.objects.create(
cpu_usage=cpu_usage,
cpu_core_usage=cpu_core_usage,
cpu_temp=cpu_temp,
gpu_usage=gpu_usage,
gpu_memory=gpu_memory,
gpu_temp=gpu_temp,
ram_usage=ram_usage,
swap_memory_usage=swap_memory_usage,
temp=temp,
upload_speed=mb_new_sent,
download_speed=mb_new_received,
total_network=mb_new_total,
disk_read=disk_read,
disk_write=disk_write,
battery_percentage=battery_percentage
)
class buildMapSessionsAPIView(LoginRequiredMixin, generic.ListView):
model = BuildMapSession
template_name = 'aiders/buildMapSession.html'
queryset = BuildMapSession.objects.all()
def get_context_data(self, **kwargs):
# Call the base implementation first to get the context
operation = Operation.objects.get(
operation_name=self.kwargs.get('operation_name'))
if not self.request.user.has_perm('join_operation', Operation.objects.filter(operation_name=self.kwargs.get('operation_name'))[0]):
raise PermissionDenied(
"You do not have permission to join the operation.")
context = super(buildMapSessionsAPIView,
self).get_context_data(**kwargs)
context['MapSession_results'] = list(
operation.buildmapsession_set.all())
index = 0
urlList = []
list_non_zero_images = list(BuildMapImage.objects.filter().values(
'session').annotate(n=models.Count("pk")))
while index < len(context['MapSession_results']):
element = context['MapSession_results'][index]
save = False
for session_non_zero_images in list_non_zero_images:
if session_non_zero_images['session'] == context['MapSession_results'][index].id:
context['MapSession_results'][index].images = session_non_zero_images['n']
save = True
if save == False:
context['MapSession_results'].remove(element)
else:
urlList.append(self.request.build_absolute_uri(reverse(
'build_map_session_share', args=[self.kwargs.get('operation_name'), element.id])))
index += 1
context['operation_name'] = self.kwargs.get('operation_name')
context['urls'] = urlList
return context
class buildMapSessionsShareAPIView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
self.kwargs.get('pk')
buildMapSessionObject = BuildMapSession.objects.get(
pk=self.kwargs.get('pk'))
fileList = []
with open('buildMapSession.csv', 'w') as csvFile:
fileWriter = csv.writer(
csvFile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
fileWriter.writerow(
[f.name for f in BuildMapSession._meta.get_fields()])
dataList = []
for key in [f.name for f in BuildMapSession._meta.get_fields()]:
try:
dataList.append(getattr(buildMapSessionObject, key))
except:
dataList.append("")
fileWriter.writerow(dataList)
with open('buildMapImages.csv', 'w') as csvFile2:
fileWriter = csv.writer(
csvFile2, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
fileWriter.writerow(
[f.name for f in BuildMapImage._meta.get_fields()])
for data in BuildMapImage.objects.filter(session=self.kwargs.get('pk')):
dataList = []
for key in [f.name for f in BuildMapImage._meta.get_fields()]:
try:
if isinstance(getattr(data, key), point.Point):
dataList.append(str(getattr(data, key).coords[0])+' '+str(
getattr(data, key).coords[1]))
else:
dataList.append(getattr(data, key))
except:
dataList.append("")
fileWriter.writerow(dataList)
try:
if not os.path.exists(default_storage.path('')+'/temp/'):
os.makedirs(default_storage.path('')+'/temp/')
else:
shutil.rmtree(default_storage.path('')+'/temp/')
os.makedirs(default_storage.path('')+'/temp/')
shutil.move('buildMapSession.csv', default_storage.path(
'')+'/temp/buildMapSession.csv')
shutil.move('buildMapImages.csv', default_storage.path(
'')+'/temp/buildMapImages.csv')
os.mkdir(default_storage.path('')+'/temp/' +
BuildMapImage.objects.filter(session=self.kwargs.get('pk')).last().path.split('/')[0])
for data in BuildMapImage.objects.filter(session=self.kwargs.get('pk')):
shutil.copyfile(default_storage.path(data.path),
default_storage.path('')+'/temp/'+data.path)
except Exception as e:
pass
try:
zip_file = zipfile.ZipFile(default_storage.path(
'build_map_session_share.zip'), 'w')
for root, dirs, files in os.walk(default_storage.path('temp')):
for f in files:
zip_file.write(os.path.join(root, f), f)
zip_file.close()
zip_file = open(default_storage.path(
'build_map_session_share.zip'), 'rb')
return FileResponse(zip_file)
except Exception as e:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
class waterCollectionActivatedAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
drone_name = request.POST.get('drone_id')
operation_name = kwargs.get('operation_name')
if Drone.objects.get(drone_name=drone_name).water_sampler_available:
try:
water_collector.publish_message(drone_name, 1)
WaterSampler.objects.create(
drone=Drone.objects.get(drone_name=drone_name),
operation=Operation.objects.get(
operation_name=operation_name),
user=User.objects.get(pk=request.user.pk),
telemetry=Telemetry.objects.filter(
drone=Drone.objects.get(drone_name=drone_name)).last(),
)
logger.info(
'Water sampler activated for drone {}.'.format(drone_name))
return HttpResponse('Sending message to drone.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(
'Water sampler encounter an error for drone {}. Error: {}'.format(drone_name, e))
return HttpResponse('Water sampler encounter an error for drone {}.'.format(drone_name), status=status.HTTP_200_OK)
class ballisticActivatedAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
drone_name = request.POST.get('drone_id')
operation_name = kwargs.get('operation_name')
if Drone.objects.get(drone_name=drone_name).ballistic_available:
try:
ballistic.publish_message(drone_name, 1)
Ballistic.objects.create(
drone=Drone.objects.get(drone_name=drone_name),
operation=Operation.objects.get(
operation_name=operation_name),
user=User.objects.get(pk=request.user.pk),
telemetry=Telemetry.objects.filter(
drone=Drone.objects.get(drone_name=drone_name)).last(),
)
logger.info(
'Ballistic activated for drone {}.'.format(drone_name))
return HttpResponse('Sending message to drone.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(
'Ballistic encounter an error for drone {}. Error: {}'.format(drone_name, e))
return HttpResponse('Ballistic encounter an error for drone {}.'.format(drone_name), status=status.HTTP_200_OK)
class rangeFinderAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
drone_name = request.POST.get('drone_id')
start_stop = request.POST.get('start_stop')
operation_name = kwargs.get('operation_name')
if Drone.objects.get(drone_name=drone_name).camera_model:
try:
range_detection.buildMapPublisherSingleMessage(
drone_name, start_stop)
logger.info(
'Range Finder activated for drone {}.'.format(drone_name))
return HttpResponse('Sending message to drone.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(
'Range Finder encounter an error for drone {}. Error: {}'.format(drone_name, e))
return HttpResponse('Range Finder an error for drone {}.'.format(drone_name), status=status.HTTP_200_OK)
class frontEndUserInputAPIView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
element = request.POST.get('elementId')
value = None
if request.POST.get('active') == "true":
active = True
elif request.POST.get('active') == "false":
active = False
else:
active = True
value = request.POST.get('active')
operation_name = kwargs.get('operation_name')
try:
FrontEndUserInput.objects.create(
operation=Operation.objects.get(operation_name=operation_name),
element_name=element,
active=active,
value=value
)
return HttpResponse('Action Saved Successful.', status=status.HTTP_200_OK)
except Exception as e:
logger.error(e)
return HttpResponse("Action Not Saved Successful.", status=status.HTTP_200_OK)
class SystemMonitoringView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
if request.user.is_superuser:
return render(request, 'aiders/monitoring-platform.html', {})
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
class ControlDevicesMonitoringView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
if request.user.is_superuser:
drones = Drone.objects.all()
available_drones = list(ControlDevice.objects.filter().values(
'drone').annotate(n=models.Count("pk")))
temp = []
for drones_temp in available_drones:
temp.append(Drone.objects.get(id=drones_temp['drone']))
available_drones = temp
return render(request, 'aiders/monitoring-control-devices.html', {'drones': drones, 'available_drones': available_drones})
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
class ControlDeviceMonitoringView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
print(kwargs.get('control_device'))
if request.user.is_superuser:
drone_name = kwargs.get('control_device')
available_drones = list(ControlDevice.objects.filter().values(
'drone').annotate(n=models.Count("pk")))
temp = []
for drones_temp in available_drones:
temp.append(drones_temp['drone'])
available_drones = temp
if not Drone.objects.get(drone_name=drone_name).id in available_drones:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
return render(request, 'aiders/monitoring-control-device.html', {'drone_name': drone_name})
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
def test_my_high_accuracy(self, lat, long, altitude, pitch, roll, heading):
import math
from geopy.distance import geodesic
pitch = pitch
roll = roll
distance_pitch = altitude * math.tan(pitch*math.pi/180) # lat
distance_roll = altitude * math.tan(roll*math.pi/180) # long
destination_pitch = geodesic(
kilometers=distance_pitch/1000).destination((0, 0), heading+0)
destination_roll = geodesic(
kilometers=distance_roll/1000).destination((0, 0), heading+270)
newLat = lat+destination_pitch.latitude+destination_roll.latitude
newLong = long+destination_pitch.longitude+destination_roll.longitude
return(newLat, newLong)
class LoraTransmitterLocationRetrieveAPIView(LoginRequiredMixin, generics.RetrieveAPIView):
queryset = LoraTransmitterLocation.objects.all()
serializer_class = LoraTransmitterLocationSerializer
lookup_field = 'tagName'
def get_object(self):
tag_name = self.kwargs.get("lora_device_name")
qs = LoraTransmitterLocation.objects.filter(
loraTransmitter__tagName=tag_name)
if not qs.exists():
raise Http404('Object not found')
return qs.last() # Return the most recent information about this lora device
class LoraTransmiterListAPIView(LoginRequiredMixin, generics.ListAPIView):
queryset = LoraTransmitter.objects.all()
serializer_class = LoraTransmitterSerializer
class Lidar3DMesh(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
lidar_session_list = list(LidarPointSession.objects.filter(operation=Operation.objects.get(
operation_name=self.kwargs.get("operation_name")), is_process=True, is_active=False).values())
for session in lidar_session_list:
session['start_time'] = str(session['start_time'])
if session['end_time'] != None:
session['end_time'] = str(session['end_time'])
lidar_session_list = json.dumps(lidar_session_list)
return HttpResponse(lidar_session_list)
def post(self, request, *args, **kwargs):
mesh_id = request.POST.get('mesh_id')
data_return = {}
data_return['id'] = mesh_id
data_return['file_path'] = 'triangle_mesh/'+str(mesh_id)+'.glb'
lat = LidarPoint.objects.filter(lidar_point_session=mesh_id).aggregate(
Avg('telemetry__lat'))
lon = LidarPoint.objects.filter(
lidar_point_session=mesh_id).aggregate(Avg('telemetry__lon'))
data_return['long'] = lon['telemetry__lon__avg']
data_return['lat'] = lat['telemetry__lat__avg']
data_return['height'] = 0
data_return['heading'] = LidarPoint.objects.filter(
lidar_point_session=mesh_id)[0].telemetry.heading
data_return = json.dumps(data_return)
return HttpResponse(data_return)
class Lidar3DPoints(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
lidar_session_list = list(LidarPointSession.objects.filter(operation=Operation.objects.get(
operation_name=self.kwargs.get("operation_name")), is_active=False).values())
for session in lidar_session_list:
session['start_time'] = str(session['start_time'])
if session['end_time'] != None:
session['end_time'] = str(session['end_time'])
lidar_session_list = json.dumps(lidar_session_list)
return HttpResponse(lidar_session_list)
def post(self, request, *args, **kwargs):
mesh_id = request.POST.get('mesh_id')
list_lidar_points = list(LidarPoint.objects.filter(
lidar_point_session=mesh_id))
point_dict = self.point_db_to_json(list_lidar_points)
point_dict = {'data': point_dict}
lat = LidarPoint.objects.filter(lidar_point_session=mesh_id).aggregate(
Avg('telemetry__lat'))
lon = LidarPoint.objects.filter(
lidar_point_session=mesh_id).aggregate(Avg('telemetry__lon'))
point_dict['coordinates'] = [
lat['telemetry__lat__avg'], lon['telemetry__lon__avg']]
# print(point_dict['data']['0']['coordinates'])
# lidar_points.lidar_points_to_long_lat(0, 0, 0, 0, 0, 0)
point_dict = json.dumps(point_dict)
return HttpResponse(point_dict)
def point_db_to_json(self, points):
point_id = 0
point_dict = {}
list_colors = []
for point in points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
list_colors.append(int(data[3]))
list_colors.append(int(data[4]))
list_colors.append(int(data[5]))
color_max = max(list_colors)
color_min = min(list_colors)
for point in points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
data = [float(x) for x in data]
point_dict[str(point_id)] = {}
point_dict[str(point_id)]['coordinates'] = data[0:3]
point_dict[str(point_id)]['color'] = [
(int(data[3]) - color_min), (int(data[4]) - color_min), (int(data[5]) - color_min)]
point_id = point_id+1
return point_dict
class Lidar_process_cloud_points(LoginRequiredMixin, generics.ListAPIView):
def post(self, request, *args, **kwargs):
mesh_id = request.POST.get('mesh_id')
mesh_object = LidarPointSession.objects.get(id=mesh_id)
works = self.run_lidar_point_triangle(mesh_object)
if works == True:
mesh_object_update = LidarPointSession.objects.filter(id=mesh_id)
mesh_object_update.update(is_process=True)
return HttpResponse(200, status=status.HTTP_200_OK)
return HttpResponse(500, status=status.HTTP_200_OK)
def get(self, request, *args, **kwargs):
lidar_session_list = list(LidarPointSession.objects.filter(operation=Operation.objects.get(
operation_name=self.kwargs.get("operation_name")), is_process=False, is_active=False).values())
for session in lidar_session_list:
session['start_time'] = str(session['start_time'])
if session['end_time'] != None:
session['end_time'] = str(session['end_time'])
lidar_session_list = json.dumps(lidar_session_list)
return HttpResponse(lidar_session_list)
def run_lidar_point_triangle(self, lidar_object):
list_all_points = []
list_all_color_points = []
list_of_lidar_points = list(
LidarPoint.objects.filter(lidar_point_session=lidar_object))
list_colors = []
for point in list_of_lidar_points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
list_colors.append(int(data[3]))
list_colors.append(int(data[4]))
list_colors.append(int(data[5]))
color_max = max(list_colors)
color_min = min(list_colors)
for point in list_of_lidar_points:
data_point = point.points.split('|')
for loop_data in data_point:
data = loop_data.split(',')
if data != ['']:
data = [float(x) for x in data]
list_all_points.append(
[data[0], data[1], data[2]])
list_all_color_points.append([
(int(data[3]) - color_min) / (color_max-color_min), (int(data[4]) - color_min) / (color_max-color_min), (int(data[5]) - color_min) / (color_max-color_min)])
for loop_data in list_all_points:
loop_data = np.asarray(loop_data)
list_all_points = np.asarray(list_all_points)
for loop_data in list_all_color_points:
loop_data = np.asarray(loop_data)
list_all_color_points = np.asarray(list_all_color_points)
try:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(list_all_points)
pcd.colors = o3d.utility.Vector3dVector(list_all_color_points)
# o3d.visualization.draw_geometries([pcd], point_show_normal=True)
alpha = 0.1
tetra_mesh, pt_map = o3d.geometry.TetraMesh.create_from_point_cloud(
pcd)
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(
pcd, alpha, tetra_mesh, pt_map)
# mesh.vertex_colors = o3d.utility.Vector3dVector(
# list_all_color_points)
if not mesh.has_vertex_normals():
mesh.compute_vertex_normals()
if not mesh.has_triangle_normals():
mesh.compute_triangle_normals()
# o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True)
if not os.path.exists(default_storage.path('triangle_mesh')):
os.makedirs(default_storage.path('triangle_mesh'))
if os.path.exists(default_storage.path('triangle_mesh/'+str(lidar_object.id)+'.glb')):
os.remove(default_storage.path(
'triangle_mesh/'+str(lidar_object.id)+'.glb'))
o3d.io.write_triangle_mesh(
default_storage.path('triangle_mesh/'+str(lidar_object.id)+'.glb'), mesh)
return True
except Exception as e:
print(e)
return False
class FlyingReportAPIView(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
operation_name = self.kwargs.get("operation_name")
AvailableDroneList = list(Drone.objects.filter(
operation__operation_name=operation_name).values())
listDrones = []
for drone in AvailableDroneList:
listDrones.append({"drone_name": drone['drone_name'],
"latitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lat,
"longitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lon})
return render(request, 'aiders/flying_report.html', {'list_of_drones': listDrones, 'available_drones': json.dumps(listDrones), 'operation_name': operation_name, 'form': FlyingReportForm()})
def post(self, request, *args, **kwargs):
user = request.user.username
if request.POST.get('form_selection') != 'custom':
drone = request.POST.get('form_selection')
else:
drone = 'Unknown'
operation_name = self.kwargs.get("operation_name")
form = FlyingReportForm(request.POST)
if form.is_valid():
latitude = request.POST.get('latitude')
longitude = request.POST.get('longitude')
altitude = request.POST.get('altitude')
radius = request.POST.get('radius')
buffer_altitude = request.POST.get('buffer_altitude')
buffer_radius = request.POST.get('buffer_radius')
start_date = request.POST.get('start_date_time')
end_date = request.POST.get('end_date_time')
start_date = datetime.datetime.strptime(
start_date, '%Y-%m-%dT%H:%M')
end_date = datetime.datetime.strptime(
end_date, '%Y-%m-%dT%H:%M')
path = 'daily_fly_notams/notams' + \
str(len(FlyingReport.objects.all()))+'.pdf'
flying_report.main(user, drone, operation_name, latitude, longitude, altitude, radius,
buffer_altitude, buffer_radius, start_date, end_date, path)
try:
drone = Drone.objects.get(drone_name=drone)
except Drone.DoesNotExist:
drone = None
FlyingReport.objects.create(user=request.user, drone=drone, operation=Operation.objects.get(operation_name=operation_name), latitude=latitude, longitude=longitude, altitude=altitude,
radius=radius, buffer_altitude=buffer_altitude, buffer_radius=buffer_radius, start_date_time=start_date, end_date_time=end_date, file_path=path)
response = open(default_storage.path(path), 'rb')
return FileResponse(response)
operation_name = self.kwargs.get("operation_name")
AvailableDroneList = list(Drone.objects.filter(
operation__operation_name=operation_name).values())
listDrones = []
for drone in AvailableDroneList:
listDrones.append({"drone_name": drone['drone_name'],
"latitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lat,
"longitude": Telemetry.objects.filter(drone__drone_name=drone['drone_name']).last().lon})
return render(request, 'aiders/flying_report.html', {'list_of_drones': listDrones, 'available_drones': json.dumps(listDrones), 'operation_name': operation_name, 'form': form})
class FlyingReportTableAPIView(LoginRequiredMixin, generics.ListAPIView):
def get(self, request, *args, **kwargs):
operation_name = self.kwargs.get("operation_name")
fly_reports = FlyingReport.objects.filter(
operation=Operation.objects.get(operation_name=operation_name))
return render(request, 'aiders/flying_reports.html', {'flying_reports': fly_reports, 'operation_name': operation_name})
class DroneMovementAPIView(LoginRequiredMixin, generics.ListAPIView):
def create_data_to_db(data, drone_name):
if(DroneMovement.objects.get(seq=data.seq, uid=data.uid, time_stamp=data.time_stamp) != None):
DroneMovement.objects.create(
seq=data.seq,
uid=data.uid,
time_stamp=data.timestamp,
drone=Drone.objects.get(drone_name=drone_name),
flight_logic_state=data.flight_logic_state,
wind_speed=data.wind_speed,
wind_angle=data.wind_angle,
battery_voltage=data.battery_voltage,
battery_current=data.battery_current,
position_x=data.position_x,
position_y=data.position_y,
position_z=data.position_z,
altitude=data.altitude,
orientation_x=data.orientation_x,
orientation_y=data.orientation_y,
orientation_z=data.orientation_z,
orientation_w=data.orientation_w,
velocity_x=data.velocity_x,
velocity_y=data.velocity_y,
velocity_z=data.velocity_z,
angular_x=data.angular_x,
angular_y=data.angular_y,
angular_z=data.angular_z,
linear_acceleration_x=data.linear_acceleration_x,
linear_acceleration_y=data.linear_acceleration_y,
linear_acceleration_z=data.linear_acceleration_z,
payload=data.payload,
)
def settings_view(request):
if request.user.is_authenticated:
if request.method == 'GET':
use_online_map = UserPreferences.objects.get(
user=request.user).use_online_map
return render(request, 'aiders/settings.html', {'use_online_map': use_online_map})
elif request.method == 'POST':
selectedVal = request.POST.get('map_mode_dropdown')
use_online_map = True if selectedVal == Constants.ONLINE_MAP_MODE else False
UserPreferences.objects.filter(user=request.user).update(
use_online_map=use_online_map)
return render(request, 'aiders/settings.html', {'use_online_map': use_online_map})
# Delete later
class TestingBuildMap(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
with open('aiders/buildmapimages_db.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
active = True
for row in spamreader:
my_list = " ".join(row).split(",")
# if my_list[1] == 'matrice300_5807/16650632878898530304.jpeg':
# active = True
# if my_list[1] == 'matrice300_5807/16650634753086040064.jpeg':
# active = False
# # print(active)
print(my_list[1])
print(my_list[1] ==
'matrice300_5807/16680725277336719360.jpeg')
if my_list[1] == 'Build_Maps_matrice300_5807_2022-11-10_11.28.46/16680725277336719360.jpeg':
if active:
newBearing = (float(my_list[8])+float(my_list[14]))/2
long_lat = my_list[6].split(" ")
long_lat[1] = float(long_lat[1])
long_lat[0] = float(long_lat[0])
# long_lat[1], long_lat[0]=self.test_my_high_accuracy(float(long_lat[1]),float(long_lat[0]), float(my_list[7]), float(my_list[10]), float(my_list[9]), newBearing)
print(long_lat[1], long_lat[0])
destinations = img_georeference.calcPoints(float(long_lat[1]), float(
long_lat[0]), newBearing, float(my_list[7]), my_list[1], 'none', 'Zenmuse_H20T')
# print(destinations)
# print(float(my_list[8])+newBearing)
# print(float(my_list[10]), float(my_list[9]))
try:
print(
Point(float(long_lat[0]), float(long_lat[1])))
image = BuildMapImage.objects.create(
path=my_list[1],
top_left=Point(
destinations[2].longitude, destinations[2].latitude),
top_right=Point(
destinations[0].longitude, destinations[0].latitude),
bottom_left=Point(
destinations[1].longitude, destinations[1].latitude),
bottom_right=Point(
destinations[3].longitude, destinations[3].latitude),
centre=Point(
float(long_lat[0]), float(long_lat[1])),
altitude=Decimal(my_list[7]),
bearing=Decimal(
(float(my_list[8])+float(my_list[14]))/2),
d_roll=None,
d_pitch=None,
d_yaw=None,
g_roll=None,
g_pitch=None,
g_yaw=None,
session_id=1
)
print('working')
# active=False
except Exception as e:
print(e)
return HttpResponse(status=status.HTTP_200_OK)
|
KIOS-Research/AIDERS
|
aidersplatform/django_api/aiders/views.py
|
views.py
|
py
| 110,716 |
python
|
en
|
code
| 4 |
github-code
|
6
|
13303759601
|
import requests, asyncio
from app import ACCESS_TOKEN, PYONET_API_URL, db
from app.tools.p3log import P3Log
class Poller:
def __init__(self):
self.p3log = P3Log("poller.log")
self.poll_task = None
self.devices = []
async def test_access_token(self):
try:
r = requests.get(f"{PYONET_API_URL}/auth/test/api_key", headers={"Authorization": f"{ACCESS_TOKEN}"})
r.raise_for_status()
self.p3log.log_success("Successfully authenticated with Pyonet-API")
return True
except requests.exceptions.ConnectionError as ce:
self.p3log.log_warning(f"Could not connect to Pyonet-API. Suggested: check if Pyonet-API is running and the PYONET_API_URL is correctly configured in the .env. Error: {str(ce)}")
return False
except Exception as e:
self.p3log.log_error(f"ACCESS_TOKEN is invalid. Suggested: generate a new access token from the Pyonet-Dashboard interface and add it to the .env file. {str(e)}")
return False
async def init_polling(self):
''' Retrieves devices from Pyonet-API and starts the polling loop
'''
try:
r = requests.get(f"{PYONET_API_URL}/poller/devices", headers={"Authorization": ACCESS_TOKEN})
r.raise_for_status()
self.devices = r.json()
self.p3log.log_success(f"Successfully retrieved {len(self.devices)} devices from Pyonet-API")
# Start polling loop
loop = asyncio.get_event_loop()
self.poll_task = loop.create_task(self.start_poll_loop())
except Exception as e:
self.p3log.log_error(f"Initialization failed. Error: {str(e)}")
return False
# Main Polling Loop
async def start_poll_loop(self):
''' Polls devices in a loop
'''
try:
for i in range(10):
await self.poll_devices()
await asyncio.sleep(1)
except asyncio.CancelledError as e:
self.p3log.log(f"Polling loop exited")
return False
async def poll_devices(self):
for device in self.devices:
print("Polling device: ", device["name"])
|
treytose/Pyonet-Poller
|
pyonet-poller/app/libraries/libpoller.py
|
libpoller.py
|
py
| 2,336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72757283709
|
"""
Utilities I can't put anywhere else...
"""
import time
from math import copysign, gcd
from copy import copy
import sys
import numpy as np
import datetime
import functools
def true_if_answer_is_yes(prompt=""):
invalid = True
while invalid:
x = input(prompt)
x = x.lower()
if x[0] == "y":
return True
elif x[0] == "n":
return False
print("Need one of yes/no, Yes/No, y/n, Y/N")
def flatten_list(some_list):
flattened = [item for sublist in some_list for item in sublist]
return flattened
class not_required_flag(object):
def __repr__(self):
return "Not required"
NOT_REQUIRED = not_required_flag()
def group_dict_from_natural(dict_group):
"""
If we're passed a natural grouping dict (eg dict(bonds=["US10", "KR3", "DE10"], equity=["SP500"]))
Returns the dict optimised for algo eg dict(US10=["KR3", "DE10"], SP500=[], ..)
:param dict_group: dictionary of groupings
:type dict_group: dict
:returns: dict
>>> a=dict(bonds=["US10", "KR3", "DE10"], equity=["SP500"])
>>> group_dict_from_natural(a)['KR3']
['US10', 'DE10']
"""
if len(dict_group) == 0:
return dict()
all_names = sorted(
set(sum([dict_group[groupname] for groupname in dict_group.keys()], []))
)
def _return_without(name, group):
if name in group:
g2 = copy(group)
g2.remove(name)
return g2
else:
return None
def _return_group(name, dict_group):
ans = [
_return_without(name, dict_group[groupname])
for groupname in dict_group.keys()
]
ans = [x for x in ans if x is not None]
if len(ans) == 0:
return []
ans = ans[0]
return ans
gdict = dict([(name, _return_group(name, dict_group)) for name in all_names])
return gdict
def str2Bool(x):
if isinstance(x, bool):
return x
return x.lower() in ("t", "true")
def str_of_int(x):
"""
Returns the string of int of x, handling nan's or whatever
:param x: Name of python package
:type x: int or float
:returns: 1.0 or -1.0
>>> str_of_int(34)
'34'
>>> str_of_int(34.0)
'34'
>>> import numpy as np
>>> str_of_int(np.nan)
''
"""
if isinstance(x, int):
return str(x)
try:
return str(int(x))
except BaseException:
return ""
def sign(x):
"""
Return the sign of x (float or int)
:param x: Thing we want sign of
:type x: int, float
:returns: 1 or -1
>>> sign(3)
1.0
>>> sign(3.0)
1.0
>>> sign(-3)
-1.0
>>> sign(0)
1.0
"""
return copysign(1, x)
def value_or_npnan(x, return_value=None):
"""
If x is np.nan return return_value
else return x
:param x: np.nan or other
:return: x or return_value
>>> value_or_npnan(np.nan)
>>> value_or_npnan(np.nan, -1)
-1
>>> value_or_npnan("thing")
'thing'
>>> value_or_npnan(42)
42
"""
try:
if np.isnan(x):
return return_value
else:
pass
# Not a nan will return x
except BaseException:
# Not something that can be compared to a nan
pass
# Either wrong type, or not a nan
return x
def are_dicts_equal(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
if len(intersect_keys) != len(d1_keys):
return False
same = set(o for o in intersect_keys if d1[o] == d2[o])
if len(same) != len(d1_keys):
return False
return True
PROGRESS_EXP_FACTOR = 0.9
class progressBar(object):
"""
Example (not docstring as won't work)
import time
thing=progressBar(10000)
for i in range(10000):
# do something
time.sleep(0.001)
thing.iterate()
thing.finished()
"""
def __init__(
self,
range_to_iter,
suffix="Progress",
toolbar_width=80,
show_each_time=False,
show_timings=True,
):
self._start_time = time.time()
self.toolbar_width = toolbar_width
self.current_iter = 0
self.suffix = suffix
self.range_to_iter = range_to_iter
self.range_per_block = range_to_iter / np.float(toolbar_width)
self._how_many_blocks_displayed = -1 # will always display first time
self._show_each_time = show_each_time
self._show_timings = show_timings
self.display_bar()
def estimated_time_remaining(self):
total_iter = self.range_to_iter
iter_left = total_iter - self.current_iter
time_per_iter = self.current_estimate_of_times
if time_per_iter is None:
return 0
return iter_left * time_per_iter
def update_time_estimate(self):
## don't maintain a list per se, instead exponential
time_since_last_call = self.time_since_last_called()
current_estimate = self.current_estimate_of_times
if current_estimate is None:
## seed
current_estimate = time_since_last_call
else:
current_estimate = ((1 - PROGRESS_EXP_FACTOR) * time_since_last_call) + (
PROGRESS_EXP_FACTOR * current_estimate
)
self.current_estimate_of_times = current_estimate
@property
def current_estimate_of_times(self) -> float:
current_estimate = getattr(self, "_current_estimate_of_times", None)
return current_estimate
@current_estimate_of_times.setter
def current_estimate_of_times(self, current_estimate: float):
self._current_estimate_of_times = current_estimate
def time_since_last_called(self) -> float:
time_of_last_call = self.get_and_set_time_of_last_call()
current_time = self.current_time
return current_time - time_of_last_call
def get_and_set_time_of_last_call(self):
time_of_last_iter = copy(getattr(self, "_time_of_last_call", self.start_time))
self._time_of_last_call = self.current_time
return time_of_last_iter
def elapsed_time(self):
return self.current_time - self.start_time
@property
def start_time(self):
return self._start_time
@property
def current_time(self):
return time.time()
def iterate(self):
self.current_iter += 1
self.update_time_estimate()
if self.number_of_blocks_changed() or self._show_each_time:
self.display_bar()
if self.current_iter == self.range_to_iter:
self.finished()
def how_many_blocks_had(self):
return int(self.current_iter / self.range_per_block)
def how_many_blocks_left(self):
return int((self.range_to_iter - self.current_iter) / self.range_per_block)
def number_of_blocks_changed(self):
original_blocks = self._how_many_blocks_displayed
new_blocks = self.how_many_blocks_had()
if new_blocks > original_blocks:
return True
else:
return False
def display_bar(self):
percents = round(100.0 * self.current_iter / float(self.range_to_iter), 1)
if self._show_timings:
time_remaining = self.estimated_time_remaining()
time_elapsed = self.elapsed_time()
total_est_time = time_elapsed + time_remaining
time_str = "(%.1f/%.1f/%.1f secs left/elapsed/total)" % (
time_remaining,
time_elapsed,
total_est_time,
)
else:
time_str = ""
bar = "=" * self.how_many_blocks_had() + "-" * self.how_many_blocks_left()
progress_string = "\0\r [%s] %s%s %s %s\n" % (
bar,
percents,
"%",
self.suffix,
time_str,
)
sys.stdout.write(progress_string)
sys.stdout.flush()
self._how_many_blocks_displayed = self.how_many_blocks_had()
def finished(self):
self.display_bar()
sys.stdout.write("\n")
class quickTimer(object):
def __init__(self, seconds=60):
self._started = datetime.datetime.now()
self._time_limit = seconds
@property
def unfinished(self):
return not self.finished
@property
def finished(self):
time_now = datetime.datetime.now()
elapsed = time_now - self._started
if elapsed.seconds > self._time_limit:
return True
else:
return False
# avoids encoding problems with mongo
_none = ""
def none_to_object(x, object):
if x is _none:
return object
else:
return x
def object_to_none(x, object, y=_none):
if x is object:
return y
else:
return x
def get_unique_list(somelist):
uniquelist = []
for letter in somelist:
if letter not in uniquelist:
uniquelist.append(letter)
return uniquelist
MISSING_STR = -1
def transfer_object_attributes(named_tuple_object, original_object):
kwargs = dict(
[
(field_name, getattr(original_object, field_name))
for field_name in named_tuple_object._fields
]
)
new_object = named_tuple_object(**kwargs)
return new_object
def highest_common_factor_for_list(list_of_ints: list) -> int:
"""
:param list_of_ints:
:return: int
>>> highest_common_factor_for_list([2,3,4])
1
>>> highest_common_factor_for_list([2,6,4])
2
"""
return functools.reduce(gcd, list_of_ints)
def divide_list_of_ints_by_highest_common_factor(list_of_ints: list) -> list:
"""
:param list_of_ints:
:return: list
>>> divide_list_of_ints_by_highest_common_factor([1,2])
[1, 2]
>>> divide_list_of_ints_by_highest_common_factor([2,4])
[1, 2]
>>> divide_list_of_ints_by_highest_common_factor([1,2,3])
[1, 2, 3]
>>> divide_list_of_ints_by_highest_common_factor([1])
[1]
"""
gcd_value = highest_common_factor_for_list(list_of_ints)
new_list = [int(float(x) / gcd_value) for x in list_of_ints]
return new_list
def list_of_ints_with_highest_common_factor_positive_first(list_of_ints: list) -> list:
"""
Used to identify the unique version of a spread or fly contract
:param list_of_ints:
:return: list
>>> list_of_ints_with_highest_common_factor_positive_first([1])
[1]
>>> list_of_ints_with_highest_common_factor_positive_first([-1])
[1]
>>> list_of_ints_with_highest_common_factor_positive_first([1,-1])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([-1,1])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([-1,2])
[1, -2]
>>> list_of_ints_with_highest_common_factor_positive_first([-2,2])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([2,-2])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([2,-4,2])
[1, -2, 1]
>>> list_of_ints_with_highest_common_factor_positive_first([-2,4,-2])
[1, -2, 1]
"""
new_list = divide_list_of_ints_by_highest_common_factor(list_of_ints)
multiply_sign = sign(new_list[0])
new_list = [int(x * multiply_sign) for x in new_list]
return new_list
def np_convert(val):
"""
Converts the passed numpy value to a native python type
>>> val = np.int64(1)
>>> type(val)
<class 'numpy.int64'>
>>> type(np_convert(val))
<class 'int'>
:param val:
:return: val as native type
"""
return val.item() if isinstance(val, np.generic) else val
if __name__ == "__main__":
import doctest
doctest.testmod()
|
ahalsall/pysystrade
|
syscore/genutils.py
|
genutils.py
|
py
| 11,815 |
python
|
en
|
code
| 4 |
github-code
|
6
|
32058100772
|
lst = []
T = int(input())
for t in range(T):
t = str(input())
cnt = 0
ans = 0
for result in t:
if result == 'O':
cnt += 1
ans += cnt
elif result == 'X':
cnt = 0
print(ans)
|
doll2gom/Algorithm
|
백준/Bronze/8958. OX퀴즈/OX퀴즈.py
|
OX퀴즈.py
|
py
| 255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32467203933
|
cnt_double_quote = 0
def solve(s):
global cnt_double_quote
res = ''
for c in s:
if c == '"':
cnt_double_quote += 1
if cnt_double_quote % 2 == 1: # First quote
res += '``'
else: # Second quote
res += "''"
else:
res += c
return res
def test_simple():
assert solve('"To be or not to be," quoth the bard, "that is the question."') \
== "``To be or not to be,'' quoth the bard, ``that is the question.''"
assert solve('The programming contestant replied: "I must disagree.') \
== "The programming contestant replied: ``I must disagree."
if __name__ == '__main__':
while True:
try:
s = input()
print(solve(s))
except EOFError:
break
|
jasonhuh/UVa-Solutions
|
272 TEX Quotes/272_TEXT_Quotes.py
|
272_TEXT_Quotes.py
|
py
| 834 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24929832115
|
#!/usr/bin/python
from nrf24 import NRF24
import time
from struct import *
class Spinner:
def __init__(self):
# Set up the radio
pipes = ["1Node", "2Node"]
self.radio = NRF24()
self.radio.begin(0,0,25,24) #Set CE and IRQ pins
self.radio.setRetries(15,15)
self.radio.setPayloadSize(32)
self.radio.setChannel(0x4c)
self.radio.setDataRate(NRF24.BR_1MBPS)
self.radio.setPALevel(NRF24.PA_LOW)
self.radio.setAutoAck(1)
self.radio.openWritingPipe(pipes[0])
self.radio.openReadingPipe(1, pipes[1])
self.radio.printDetails()
def getLoad(self):
# Get the average load from the load cell
self.radio.stopListening();
data = pack('<cf', b'L', 0.0)
if not self.radio.write(data):
print("Sending failed")
return False
else:
print("Sending OK")
self.radio.startListening();
while not self.radio.available([0], False):
pass
print("Message received")
recv_buffer = []
self.radio.read(recv_buffer, 32)
data = list(unpack('<llllllll', bytes(recv_buffer)))
return data
def setSetpoint(self, setpoint):
# Set the setpoint of the stepper (mm)
self.radio.stopListening();
data = pack('<cf', b's', setpoint)
if not self.radio.write(data):
return False
return True
def sendInstruction(self, instruction, value, value_type):
# Send any instruction
# value types:
# f float
# l long
# c character
self.radio.stopListening();
data = pack('<c' + value_type, instruction.encode(), value)
if not self.radio.write(data):
print("Sending failed")
return False
else:
print("Sending OK")
return True
def commandLineInterface(self):
# Send instructions from command line
while True:
c = input("Instruction character: ")
v = input("Instruction value: ")
if c in ["s", "v", "a", "t", "f", "1", "0", "p", "c"]:
t = 'f'
v = float(v)
elif c in ["u"]:
t = 'l'
v = long(v)
elif c in ["m"]:
t = 'i'
v = int(v)
elif c in ["i", "r", "L", "C"]:
t = '?'
v = bool(v)
else:
print("Invalid instruction: unknown type:", c)
continue
result = self.sendInstruction(c, v, t)
print(result)
if __name__ == "__main__":
s = Spinner()
s.commandLineInterface()
|
norn93/honey-spinner-server
|
spinner.py
|
spinner.py
|
py
| 2,751 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7725611508
|
import itertools
def det_4x4(matrix):
if len(matrix) != 4 or len(matrix[0]) != 4:
raise ValueError("A matriz não é 4x4")
indices = [0, 1, 2, 3]
permuta = itertools.permutations(indices)
det = 0
for perm in permuta:
sinal = 1
for i in range(4):
for j in range(i + 1, 4):
if perm[i] > perm[j]:
sinal *= -1
produto = 1
for i in range(4):
produto *= matrix[i][perm[i]]
det += sinal * produto
return det
# solicita ao usuário para digitar os elementos da matriz 4x4
matrix = []
for i in range(4):
row = input(f"Digite os elementos da {i+1}ª linha separados por espaço: ")
matrix.append([int(x) for x in row.split()])
# calcula a determinante usando a fórmula de Leibniz
det = det_4x4(matrix)
# imprime o resultado
print(f"A determinante da matriz é {det}.")
|
AlexApLima/CalculoDeterminantesLeibniz
|
CalFormula.py
|
CalFormula.py
|
py
| 903 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
27560775803
|
data = open('input/day20.txt').read().splitlines()
# data= open('input/day20.txt').read().splitlines()
data = [int(d) for d in data]
id_of_0 = data.index(0)
# %% Build a circular linked list which can move its node to left or right
class Node:
def __init__(self, data, id):
self.data = data
self.next = None
self.prev = None
self.id = id
def __repr__(self):
return f'Node({self.data})'
def __str__(self):
return f'Node({self.data})'
class CircularLinkedList:
def __init__(self):
self.head = None
size = 0
def add_node(self, data_, id_):
new_node = Node(data_, id_)
if self.head is None:
self.head = new_node
new_node.next = self.head
new_node.prev = self.head
self.size = 1
else:
current = self.head
while current.next != self.head:
current = current.next
current.next = new_node
new_node.prev = current
new_node.next = self.head
self.head.prev = new_node
self.size += 1
def move_node_right(self, node, steps):
"""
:param node:
:param steps:
"""
node.prev.next = node.next
node.next.prev = node.prev
# move the
for i in range(steps):
node.next = node.next.next
node.prev = node.next.prev
node.next.prev = node
node.prev.next = node
def move_node_left(self, node, steps):
node.next.prev = node.prev
node.prev.next = node.next
for i in range(steps):
node.prev = node.prev.prev
node.next = node.prev.next
node.prev.next = node
node.next.prev = node
def find_node_with_id(self, id_):
current = self.head
for i in range(self.size):
if current.id == id_:
return current
current = current.next
return None
def __repr__(self):
current = self.head
l = []
for i_ in range(self.size):
l.append(current.data)
current = current.next
return str(l)
def find_node_after_node_times(self, node, times):
current = node
for i in range(times):
current = current.next
return current
#%%
# Part 1
# Build a circular linked list
c = CircularLinkedList()
for i, d in enumerate(data):
c.add_node(d, i)
#%%
c.move_node_left(c.find_node_with_id(id_of_0), 2)
#%%
for index in range(c.size):
node = c.find_node_with_id(index)
if node.data == 0:
pass
elif node.data > 0:
c.move_node_right(node, node.data)
elif node.data < 0:
c.move_node_left(node, abs(node.data))
#%%
result_p_1 = c.find_node_after_node_times(c.find_node_with_id(id_of_0),1000).data +\
c.find_node_after_node_times(c.find_node_with_id(id_of_0),2000).data +\
c.find_node_after_node_times(c.find_node_with_id(id_of_0),3000).data
#%% part2
from copy import deepcopy
data_2 = deepcopy(data)
key = 811589153
#%%
c2 = CircularLinkedList()
for i, d in enumerate(data_2):
c2.add_node(d * key, i)
cycle = c2.size - 1
#%%
for i in range(10):
for index in range(c2.size):
node = c2.find_node_with_id(index)
if node.data == 0:
pass
elif node.data > 0:
c2.move_node_right(node, node.data % cycle)
elif node.data < 0:
c2.move_node_left(node, abs(node.data) % cycle)
#%%
c2.find_node_after_node_times(c2.find_node_with_id(id_of_0),1000 % cycle ).data +\
c2.find_node_after_node_times(c2.find_node_with_id(id_of_0),2000 % cycle).data +\
c2.find_node_after_node_times(c2.find_node_with_id(id_of_0),3000 % cycle).data
#%%
|
nhannht/aoc2022
|
day20.py
|
day20.py
|
py
| 3,778 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70740769147
|
from __future__ import print_function
import torch
def diff_mse(x, y):
x_vec = x.view(1, -1).squeeze()
y_vec = y.view(1, -1).squeeze()
return torch.mean(torch.pow((x_vec - y_vec), 2)).item()
def ax_plus_b_vector(x, weight, bias):
return weight.mm(x).add(bias)
def ax_plus_b_scalar(x, weight, bias, h, w, number_of_channels, kernel_size):
result = 0
for c_in in range(number_of_channels):
for i in range(kernel_size):
for j in range(kernel_size):
result += x[c_in, h + i, w + j] * weight[c_in, i, j]
return result + bias
def convolved_image_size(size, kernel_size, padding, stride):
return ((size - kernel_size + 2 * padding) // stride) + 1
def im2col(img, kernel_size, device, stride=1, padding=0):
N_batch, C_in, img_size, _ = img.shape
out_size = convolved_image_size(img_size, kernel_size, padding, stride)
col = torch.zeros((kernel_size, kernel_size, N_batch, C_in, out_size, out_size))
margin = stride * out_size
for x in range(kernel_size):
for y in range(kernel_size):
col[x, y] = img[:, :, x:x + margin:stride, y:y + margin:stride]
return col.view(kernel_size*kernel_size, -1).to(device)
|
IvanProdaiko94/UCU-deep-learning-homework
|
layers/utilities.py
|
utilities.py
|
py
| 1,222 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37841455471
|
import logging
logger = logging.getLogger()
logger.setLevel(level="DEBUG")
logging.Formatter
# 创建文本处理器
file_handle = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handle.setLevel(level="ERROR")
logger.addHandler(file_handle)
fmt = "%(name)s--->%(message)s--->%(asctime)s"
logging.basicConfig(level="DEBUG", format=fmt)
logging.debug("This is debug message")
logging.info("This is info message")
logging.warning("This is warning message")
logging.error("This is error")
logging.critical("This is critical message")
|
amespaces/pythonProject
|
common/创建文件处理器.py
|
创建文件处理器.py
|
py
| 552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22906645455
|
import xlrd
class XLDateInfo(object):
def __init__(self, path=''):
self.xl = xlrd.open_workbook(path)
self.sheet = None
def get_sheet_info_by_name(self, name):
self.sheet = self.xl.sheet_by_name(name)
return self.get_sheet_info()
def get_sheet_info(self):
infolist = []
for row in range(0, self.sheet.nrows):
info = self.sheet.row_values(row)
infolist.append(info)
return infolist
if __name__ == "__main__":
data_info = XLDateInfo(r'..\test_data\get_params_headers_data.xlsx')
all_data = data_info.get_sheet_info_by_name('TestData')
print(all_data)
|
weijianhui011/uploadfile
|
public/read_excel.py
|
read_excel.py
|
py
| 659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35007978934
|
from src.main.python.Solution import Solution
# Given a binary tree and a sum, find all root-to-leaf paths where each path's sum equals the given sum.
#
# For example:
# Given the below binary tree and sum = 22,
# 5
# / \
# 4 8
# / / \
# 11 13 4
# / \ \
# 7 2 1
# return
# [
# [5,4,11,2],
# [5,8,4,5]
# ]
class Q113(Solution):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
def path_sum(root, sum, path, ans):
if not root:
return
remainder = sum - root.val
path.append(root.val)
if not root.left and not root.right:
if remainder == 0:
ans.append(path[:])
else:
path_sum(root.left, remainder, path, ans)
path_sum(root.right, remainder, path, ans)
path.pop()
ans = []
if root:
path_sum(root, sum, [], ans)
return ans
|
renkeji/leetcode
|
python/src/main/python/Q113.py
|
Q113.py
|
py
| 1,099 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21971690182
|
import json
from collections import OrderedDict
from operator import itemgetter
TEAMS = None
MATCHES = {}
def get_other(t, title):
t1, t2 = title.split("-")
if t in t1:
return get_team(t2)
return get_team(t1)
def get_team(t):
for team in TEAMS:
if team in t:
return team
def main():
global TEAMS, MATCHES
with open("standings.json", "r") as stand:
t_dict = json.loads(stand.read())
TEAMS = OrderedDict((k, t_dict[k]) for k in sorted(t_dict, key=t_dict.get))
with open("splig.json", "r") as sp:
cal = OrderedDict(json.loads(sp.read()))["calendars"][0]["events"]
for t in TEAMS:
MATCHES[t] = []
for i in cal:
if t in i["summary"]:
MATCHES[t].append(TEAMS[get_other(t, i["summary"])])
print(t)
print("11 hafta:" , MATCHES[t][:11], sum(MATCHES[t][:11]))
print("Kalan:", MATCHES[t][11:17], sum(MATCHES[t][11:17]))
print()
if __name__ == "__main__":
main()
|
mfkaptan/fixture-visualizer
|
lig.py
|
lig.py
|
py
| 1,026 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38364864905
|
#!/usr/bin/env python
import eink
import ImageFont
import ImageDraw
import sys
def parse(text):
s = ['', '']
i = 0
for c in text:
if c == '*':
i = 1 - i
else:
s[i] += c
if c.isspace():
s[1-i] += c
else:
s[1-i] += ' '
return s[0], s[1]
def draw_text(text, size=12):
ink = eink.Eink()
font = ImageFont.truetype('DejaVuSansMono.ttf', size)
# generate images
img = ink.new()
d = ImageDraw.ImageDraw(img)
text_black, text_red = parse(text)
width, height = d.multiline_textsize(text_black, font)
pos = (img.width - width)/2, (img.height - height)/2
d.multiline_text(pos, text_black, eink.BLACK, font)
d.multiline_text(pos, text_red, eink.RED, font)
# display images
ink.display(img)
def main():
draw_text(sys.argv[2], int(sys.argv[1]))
if __name__ == '__main__':
main()
|
need-being/eink
|
draw_text.py
|
draw_text.py
|
py
| 940 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12940606913
|
import json
import logging
import avro.schema
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
# Login config
logging.basicConfig(
filename='writer.log',
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO
)
# read records from sample_data and schema
raw_data = []
try:
with open('raw_data.txt','r') as txt:
for raw_record in txt.readlines():
record = raw_record.strip()
raw_data.append(record)
except Exception as err:
logging.error(err)
# Load schema
try:
schema = avro.schema.parse(open("schema.avsc").read())
except Exception as err:
logging.error(err)
raise
# Write records
try:
with open('encoded_data.avro', 'wb') as f:
writer = DataFileWriter(f, DatumWriter(), schema)
for row in raw_data:
try:
writer.append(json.loads(row))
except Exception as err:
logging.error(err)
logging.info(row.strip())
writer.close()
except Exception as err:
logging.error(err)
raise
|
jocerfranquiz/avro_test
|
write_avro.py
|
write_avro.py
|
py
| 1,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74473909946
|
from datetime import date
from django.test import Client
from django.test import TestCase
from django.urls import resolve
from .views import index, mhs_name, calculate_age
class Lab1UnitTest(TestCase):
def test_hello_name_is_exist(self):
response = Client().get('/lab-1/')
self.assertEqual(response.status_code, 200)
def test_using_index_func(self):
found = resolve('/lab-1/')
self.assertEqual(found.func, index)
def test_name_is_changed(self):
response = Client().get('/lab-1/')
self.assertTemplateUsed(response, 'lab_9/session/login.html')
session = self.client.session
session['user_login'] = 'user'
session['kode_identitas'] = '123'
session.save()
response = self.client.get('/lab-1/')
html_response = response.content.decode('utf8')
self.assertIn('<title>' + mhs_name + '</title>', html_response)
self.assertIn('<h1>Hello my name is ' + mhs_name + '</h1>', html_response)
self.assertFalse(len(mhs_name) == 0)
def test_calculate_age_is_correct(self):
self.assertEqual(0, calculate_age(date.today().year))
self.assertEqual(17, calculate_age(2000))
self.assertEqual(27, calculate_age(1990))
def test_index_contains_age(self):
response = Client().get('/lab-1/')
self.assertTemplateUsed(response, 'lab_9/session/login.html')
session = self.client.session
session['user_login'] = 'username'
session['kode_identitas'] = 'npm'
session.save()
response = self.client.get('/lab-1/')
html_response = response.content.decode('utf8')
self.assertRegex(html_response, r'<article>I am [0-9]\d+ years old</article>')
|
argaghulamahmad/ppw-lab-arga
|
lab_1/tests.py
|
tests.py
|
py
| 1,752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5687320564
|
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
import time
import numpy as np
import math
# P(t) = P(0) + V(0)*t + 1/2 * g * t^2
dt = -1
currentTime = 0
lastTime=0
def TimerOn() :
if dt>0 :
return True
else :
return False
def TimerStart():
global currentTime, lastTime, dt
if dt<0 :
currentTime = time.perf_counter()
lastTime = currentTime
def TimerGetDt():
global currentTime, lastTime, dt
currentTime = time.perf_counter()
dt = currentTime - lastTime
lastTime = currentTime
return dt
ball1 = np.array([0,0,0]) # P(0)
ball2 = np.array([0,0,0])
ball1Vy = np.array([0,5,0]) # V(0)
ball1Vz = np.array([0,0,5])
ball2Vx = np.array([10,0,0])
ball2Vz = np.array([0,0,5])
g = np.array([0, 0, 0])
et = 0.0
simulationStart = False
def GLinit() :
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(100, 100)
glutCreateWindow(b"Lab06-2:Moving with kinematics")
def RegisterCallbacks() :
glutDisplayFunc(draw)
glutIdleFunc(draw)
glutKeyboardFunc(key)
def key(k, x,y) : #To move ball, hit any key.
global simulationStart
simulationStart = True
def drawLine(x,y,z, xx,yy,zz) :
glBegin(GL_LINES)
glVertex3f(x,y,z)
glVertex3f(xx,yy,zz)
glEnd()
def drawBall(pos) :
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
glutWireSphere(1.0, 10,10)
glPopMatrix()
def draw():
global ball1, ball2, ball1Vy, ball1Vz, ball2Vx, ball2Vz, g, dt, et
if TimerOn() != True :
TimerStart()
et = 0.0
dt = 0.0
glClear(GL_COLOR_BUFFER_BIT)
# Lens
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60, 1.0, 0.1, 1000.0)
# World
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(20,20,20, 0,0,0, 0,1,0)
glColor3f(1,0,0)
drawLine(-10, 0, 0, 10, 0, 0)
glColor3f(0, 1, 0)
drawLine(0, -10, 0, 0,10, 0)
glColor3f(0, 0, 1)
drawLine(0, 0, -10, 0, 0,10)
if simulationStart :
et = et + dt
P1 = ball1 + ball1Vy * math.sin(et) + ball1Vz * math.cos(et)
P2 = ball2 + ball2Vx * math.sin(et) + ball2Vz * math.cos(et)
glColor3f(1, 1, 0)
drawBall(P1)
glColor3f(0, 1, 1)
drawBall(P2)
else:
glColor3f(1, 1, 0)
drawBall(np.array([0,0,5]))
glColor3f(0, 1, 1)
drawBall(np.array([0,0,5]))
glFlush()
dt = TimerGetDt()
GLinit()
RegisterCallbacks()
glutMainLoop()
# End of program
|
asd147asd147/High_Quality_OpenGL
|
OpenGL/lab06/lab06-2-m.py
|
lab06-2-m.py
|
py
| 2,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28521323695
|
"""change precision of order amount
Revision ID: fb84527fc0b3
Revises: 5c6f2d25c2f0
Create Date: 2018-04-07 18:51:12.160012+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'fb84527fc0b3'
down_revision = '5c6f2d25c2f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('order', 'amount',
existing_type=mysql.DECIMAL(precision=2, scale=0),
type_=sa.DECIMAL(precision=10),
existing_nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('order', 'amount',
existing_type=sa.DECIMAL(precision=10),
type_=mysql.DECIMAL(precision=2, scale=0),
existing_nullable=False)
# ### end Alembic commands ###
|
harveyslash/backend-cleaned
|
beatest/migrations/versions/20180407185112_fb84527fc0b3_change_type_of_order_amount.py
|
20180407185112_fb84527fc0b3_change_type_of_order_amount.py
|
py
| 1,011 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13194360101
|
# -*-coding=utf-8-*-
# @Time : 2019/1/28 14:19
# @File : youtube_downloader.py
import subprocess
import sys
import pymongo
import re
import codecs
def extract_link(filename='web.html'):
with codecs.open(filename, 'r', encoding='utf8') as f:
content = f.read()
try:
result = re.findall('\{"videoId":"(\w+)"\}', content)
except Exception as e:
return []
else:
return result
# 从文件下载
def download_from_txt():
db = pymongo.MongoClient('10.18.6.46', port=27001)
doc = db['db_rocky']['youtube']
CMD = 'python you-get {}'
while 1:
with open('youtube_link.txt', 'r') as f:
lines = f.readlines()
lines_copy = lines.copy()
if not lines:
break
for line in lines_copy:
print(line.strip())
# line=line.strip()
is_exists = doc.find({'url': line.strip()})
if list(is_exists):
print('{} is exists !'.format(line))
lines_copy.remove(line)
else:
try:
p = subprocess.Popen(CMD.format(line.strip()), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
output, error = p.communicate()
except Exception as e:
print(e)
continue
else:
lines_copy.remove(line)
print('{} has been downloaded !'.format(line))
try:
doc.insert({'url': line.strip()})
except Exception as e:
print(e)
with open('youtube_link.txt', 'w') as f:
f.writelines(lines_copy)
def download_from_web():
db = pymongo.MongoClient('10.18.6.46', port=27001)
doc = db['db_rocky']['youtube']
id_list = extract_link()
base_url = 'https://www.youtube.com/watch?v={}'
for idx in id_list:
full_url = base_url.format(idx)
cmd='python you-get {}'.format(full_url)
is_exists = doc.find({'url': full_url})
# if list(is_exists):
# print('已经下载过>>>>{}'.format(full_url))
# continue
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
output, error = p.communicate()
except Exception as e:
print(e)
continue
else:
print('{} 下载好了!'.format(full_url))
try:
doc.insert({'url': full_url})
except Exception as e:
print(e)
funcition_map={'1':download_from_txt,
'2':download_from_web}
option = sys.argv[1]
funcition_map.get(option)()
print('Done')
|
leegb/online_video_download
|
youtube_downloader.py
|
youtube_downloader.py
|
py
| 2,952 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19528069970
|
# -*- coding: utf-8 -*-
# 基础公共模块
__author__='zhaicao'
import pymssql
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtCore import Qt
import winreg
import os
import sys
import webbrowser
# SqlServer访问类
class MSSQL:
def __init__(self,**kwargs):
self.dbInfo = kwargs
def __GetConnect(self):
self.conn = pymssql.connect(**self.dbInfo,charset = "utf8")
cur = self.conn.cursor()
if not cur:
raise(NameError,"连接数据库失败")
else:
return cur
#返回查询结果
def ExecQuery(self,sql):
cur = self.__GetConnect()
cur.execute(sql)
resList = cur.fetchall()
self.conn.close()
return resList
# 执行sql
def ExecNonQuery(self,sql):
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
# 对象库,封装对象的基本操作
class ObjRepository(object):
def __init__(self, widgetObj, *objDict):
self.__widgetObj = widgetObj
self.__objDict = dict()
# dict取并集
for i in objDict:
self.__objDict = dict(self.__objDict,**i)
def getObjByName(self, objName):
return self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
def getObjTextByName(self, objName):
obj = self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
if isinstance(obj, QtWidgets.QComboBox):
return obj.currentText()
elif isinstance(obj, QtWidgets.QLineEdit):
return obj.text()
elif isinstance(obj, QtWidgets.QCheckBox):
return obj.checkState() == Qt.Checked
else:
return None
# 获得下拉框bool类型的值,仅支持两个选择的下拉框
def getObjBoolByName(self, objName):
obj = self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
return bool(obj.currentIndex())
def getWidgetObj(self):
return self.__widgetObj
def setObjEnabled(self, objName, state):
self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName).setEnabled(state)
def getTextByObj(self, obj):
if isinstance(obj, QtWidgets.QComboBox):
return obj.currentText()
elif isinstance(obj, QtWidgets.QLineEdit):
return obj.text()
elif isinstance(obj, QtWidgets.QCheckBox):
return obj.checkState() == Qt.Checked
else:
return None
def setObjTextByName(self, objName, text):
obj = self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
if isinstance(obj, QtWidgets.QLineEdit):
obj.setText(text)
# 基础公用类
class Util(object):
# 提示确认消息,自定义消息框
@classmethod
def mesInfomation(self, widgetObj, message,title = '提示', *args):
mesbox = QtWidgets.QMessageBox(widgetObj)
mesbox.setWindowTitle(title)
mesbox.setText(message)
# 判断按钮,有则加入按钮
if len(args) == 0:
mesbox.addButton('好', QtWidgets.QMessageBox.ActionRole)
else:
for i in args:
mesbox.addButton(i, QtWidgets.QMessageBox.ActionRole)
mesbox.exec_()
return mesbox
# classmethod
@classmethod
def writeFile(cls, filepath, fileData, connector=':'):
f = open(filepath, 'w')
try:
for i, k in fileData.items():
f.write('%s%s %s' % (str(i), str(connector), str(k)) + '\n')
except Exception as e:
print(e)
return False
finally:
f.close()
return True
# 配置dict转str
@classmethod
def dictTransforStr(cls, confList, connector=':'):
reStr = ''
for k,v in confList.items():
reStr += '%s%s %s' % (str(k), str(connector), str(v)) + '\n'
return reStr
# 获得Win桌面路径
@classmethod
def getWinDesktop(cls):
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders', )
return winreg.QueryValueEx(key, "Desktop")[0]
# 复制文本至剪贴板
@classmethod
def copyClipboardText(self, Text):
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(str(Text))
# 复制图像至剪贴板
@classmethod
def copyClipboardImage(self, image):
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setPixmap(QtGui.QPixmap(os.path.join(os.path.dirname(__file__), image)))
# 复制HTML至剪贴板
@classmethod
def copyHtml(self, html):
mimeData = QtCore.QMimeData()
mimeData.setHtml(html)
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setMimeData(mimeData)
# 打开指定页面
# path为空则使用默认浏览器
# 若找不到应用浏览器,则打开默认浏览器
@classmethod
def openUrl(self, url, path=None):
if path:
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
if os.path.exists(chromePath):
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chromePath))
webbrowser.get('chrome').open(url, new=1, autoraise=True)
else:
webbrowser.open(url, new=1, autoraise=True)
else:
webbrowser.open(url, new=1, autoraise=True)
if path == None:
webbrowser.open(url, new=1, autoraise=True)
else:
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
if os.path.exists(chromePath):
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chromePath))
webbrowser.get('chrome').open(url, new=1, autoraise=True)
else:
webbrowser.open(url, new=1, autoraise=True)
# 相对路径转绝对路径
# 参数paths: 绝对路径的目录,多参数
# 返回绝对路径
@classmethod
def getAbsPath(self, *paths):
if getattr(sys, 'frozen', False):
dir = os.path.dirname(sys.executable)
elif __file__:
dir = os.path.dirname(__file__)
return os.path.join(dir, *paths)
# 日志记录
@classmethod
def log(self, context):
import codecs
with codecs.open(self.getAbsPath('log.txt'), 'a', 'gbk') as file:
file.write(context)
file.write('\n')
if __name__ == '__main__':
a = "python"
Util().setClipboardText(a)
print(Util.getClipboardText())
|
zhaicao/pythonWorkspace
|
DeployTool/eventAction/Utils.py
|
Utils.py
|
py
| 6,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25875757160
|
from numba import jit
import numpy as np
from obspy.taup import TauPyModel
import os
@jit(nopython=True, fastmath=True)
def coords_lonlat_rad_bearing(lat1, lon1, dist_deg, brng):
"""
Returns the latitude and longitude of a new cordinate that is the defined distance away and
at the correct bearing from the starting point.
Parameters
----------
lat1 : float
Starting point latitiude.
lon1 : float
Starting point longitude.
dist_deg : float
Distance from starting point in degrees.
brng : float
Angle from north describing the direction where the new coordinate is located.
Returns
-------
lat2 : float
Longitude of the new cordinate.
lon2 : float
Longitude of the new cordinate.
"""
brng = np.radians(brng) # convert bearing to radians
d = np.radians(dist_deg) # convert degrees to radians
lat1 = np.radians(lat1) # Current lat point converted to radians
lon1 = np.radians(lon1) # Current long point converted to radians
lat2 = np.arcsin(
(np.sin(lat1) * np.cos(d)) + (np.cos(lat1) * np.sin(d) * np.cos(brng))
)
lon2 = lon1 + np.arctan2(
np.sin(brng) * np.sin(d) * np.cos(lat1), np.cos(d) - np.sin(lat1) * np.sin(lat2)
)
lat2 = np.degrees(lat2)
lon2 = np.degrees(lon2)
# lon2 = np.where(lon2 > 180, lon2 - 360, lon2)
# lon2 = np.where(lon2 < -180, lon2 + 360, lon2)
if lon2 > 180:
lon2 -= 360
elif lon2 < -180:
lon2 += 360
else:
pass
return lat2, lon2
@jit(nopython=True, fastmath=True)
def haversine_deg(lat1, lon1, lat2, lon2):
"""
Function to calculate the distance in degrees between two points on a sphere.
Parameters
----------
lat1 : float
Latitiude of point 1.
lat1 : float
Longitiude of point 1.
lat2 : float
Latitiude of point 2.
lon2 : float
Longitude of point 2.
Returns
-------
d : float
Distance between the two points in degrees.
"""
dlat = np.radians(lat2 - lat1)
dlon = np.radians(lon2 - lon1)
a = (np.sin(dlat / 2)) ** 2 + np.cos(np.radians(lat1)) * np.cos(
np.radians(lat2)
) * (np.sin(dlon / 2)) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
d = np.degrees(c)
return d
def deg_km_az_baz(lat1, lon1, lat2, lon2):
"""
Function to return the ditances in degrees and km over a spherical Earth
with the backazimuth and azimuth. Distances calculated using the haversine
formula.
Parameters
----------
lat(1/2) : float
Latitude of point (1/2)
lon(1/2) : float
Longitude of point (1/2)
Returns
-------
dist_deg : float
Distance between points in degrees.
dist_km :
Distance between points in km.
az : float
Azimuth at location 1 pointing to point 2.
baz : float
Backzimuth at location 2 pointing to point 1.
"""
# use haversine formula to get distance in degrees and km
R = 6371
dist_deg = haversine_deg(lat1, lon1, lat2, lon2)
dist_km = np.radians(dist_deg) * R
az = np.degrees(
np.arctan2(
(np.sin(np.radians(lon2 - lon1)) * np.cos(np.radians(lat2))),
np.cos(np.radians(lat1)) * np.sin(np.radians(lat2))
- np.sin(np.radians(lat1))
* np.cos(np.radians(lat2))
* np.cos(np.radians(lon2 - lon1)),
)
)
# baz=np.degrees(np.arctan2((np.sin(np.radians(lon1-lon2))*np.cos(np.radians(lat1))), np.cos(np.radians(lat2))*np.sin(np.radians(lat1)) - np.sin(np.radians(lat2))*np.cos(np.radians(lat1))*np.cos(np.radians(lon1-lon2)) ))
dLon = np.radians(lon1 - lon2)
y = np.sin(dLon) * np.cos(np.radians(lat1))
x = np.cos(np.radians(lat2)) * np.sin(np.radians(lat1)) - np.sin(
np.radians(lat2)
) * np.cos(np.radians(lat1)) * np.cos(dLon)
baz = np.arctan2(y, x)
baz = np.degrees(baz)
if baz < 0:
baz = (baz + 360) % 360
return dist_deg, dist_km, az, baz
def relocate_event_baz_slow(evla, evlo, evdp, stla, stlo, baz, slow, phase, mod='prem'):
"""
Given event location, mean station location and slowness vector
(baz and slow), relocate the event so the ray arrives with the
slowness and backazimuth.
Paramters
---------
evla : float
Event latitude.
evlo : float
Event longitude.
evdp : float
Event depth.
stla : float
Station latitude.
stlo : float
Station longitude.
baz : float
Backazimuth of slowness vector.
slow : float
Horizontal slowness of slowness vector.
phase : string
Target phase (e.g. SKS).
mod : string
1D velocity model to use (default is PREM).
Returns
-------
new_evla : float
Relocated event latitude.
new_evlo : float
Relocated event longitude.
"""
model = TauPyModel(model=mod)
dist_deg = haversine_deg(lat1=evla, lon1=evlo, lat2=stla, lon2=stlo)
# define distances to search over
dist_min=dist_deg-30
dist_max=dist_deg+30
dist_search = np.linspace(dist_min, dist_max, 1000)
# set count so it know if it has found a suitable distance
count=0
diff_slows = np.ones(dist_search.shape)
# if the difference just keeps increasing
# stop after 20 increases
early_stop_count = 0
for i,test_distance in enumerate(dist_search):
try:
## work out slowness and compare to the observed slowness
tap_out_test = model.get_travel_times(source_depth_in_km=float(evdp),
distance_in_degree=float(test_distance),
receiver_depth_in_km=0.0,
phase_list=[phase])
abs_slow_test = tap_out_test[0].ray_param_sec_degree
diff_slow = abs(abs_slow_test - slow)
## work out slowness and compare to the observed slowness
diff_slows[i] = diff_slow
if diff_slow > diff_slows[i-1]:
early_stop_count +=1
else:
early_stop_count = 0
if early_stop_count > 20:
print('increasing risidual for more than 20 iterations, breaking loop')
break
except:
pass
min = np.amin(np.array(diff_slows))
loc = np.where(np.array(diff_slows) == min)[0][0]
distance_at_slowness = dist_search[loc]
new_evla, new_evlo = coords_lonlat_rad_bearing(lat1 = stla,
lon1 = stlo,
dist_deg = distance_at_slowness,
brng = baz)
return new_evla, new_evlo
def predict_pierce_points(evla, evlo, evdp, stla, stlo, phase, target_depth, mod='prem'):
"""
Given station and event locations, return the pierce points at a particular
depth for source or receiver side locations.
Parameters
----------
evla : float
Event latitude.
evlo : float
Event longitude.
evdp : float
Event depth.
stla : float
Station latitude.
stlo : float
Station longitude.
phase : string
Target phase
target_depth : float
Depth to calculate pierce points.
mod : string
1D velocity model to use (default is PREM).
Returns
-------
r_pierce_la : float
Receiver pierce point latitude.
r_pierce_lo : float
Receiver pierce point longitude.
s_pierce_la : float
Source pierce point latitude.
s_pierce_lo : float
Source pierce point longitude.
"""
# I dont like the obspy taup pierce thing so will use
# the java script through python.
# This will assume you have taup installed:
# https://github.com/crotwell/TauP/
# print(f"taup_pierce -mod {mod} -h {evdp} -sta {stla} {stlo} -evt {evla} {evlo} -ph {phase} --pierce {target_depth} --nodiscon > ./temp.txt")
os.system(f"taup_pierce -mod {mod} -h {evdp} -sta {stla} {stlo} -evt {evla} {evlo} -ph {phase} --pierce {target_depth} --nodiscon > ./temp.txt")
# check number of lines
with open("./temp.txt", 'r') as temp_file:
lines_test = temp_file.readlines()
number_of_lines_test = len(lines_test)
with open("./temp.txt", 'r') as temp_file:
lines = temp_file.readlines()
number_of_lines = len(lines)
if number_of_lines == 2:
print(f"Only pierces depth {target_depth} once.")
print(f"Writing this one line to the file.")
source_line = lines[-1]
receiver_line = lines[-1]
elif number_of_lines == 3:
source_line = lines[1]
receiver_line = lines[-1]
elif number_of_lines > 3:
print(f"Phase {phase} pierces depth {target_depth} more than twice.")
print(f"Writing pierce point closest to source/receiver")
source_line = lines[1]
receiver_line = lines[-1]
if number_of_lines != 0:
s_dist, s_pierce_depth, s_time, s_pierce_la, s_pierce_lo = source_line.split()
r_dist, r_pierce_depth, r_time, r_pierce_la, r_pierce_lo = receiver_line.split()
else:
print('Neither the phase nor ScS can predict this arrival, not continuing')
s_pierce_la = 'nan'
s_pierce_lo = 'nan'
r_pierce_la = 'nan'
r_pierce_lo = 'nan'
# os.remove("./temp.txt")
return s_pierce_la, s_pierce_lo, r_pierce_la, r_pierce_lo
|
eejwa/Array_Seis_Circle
|
circ_array/geo_sphere_calcs.py
|
geo_sphere_calcs.py
|
py
| 9,707 |
python
|
en
|
code
| 7 |
github-code
|
6
|
71804916988
|
from __future__ import print_function
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from config import params, data
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(params["MAX_SENT_LEN"], 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, params["MAX_SENT_LEN"])
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def encode(self, x):
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
# print(logvar)
# print(mu)
# print(self.training)
# if self.training:
# print(logvar)
# std = logvar.mul(0.5).exp_()
# eps = Variable(std.data.new(std.size()).normal_())
# # print(eps)
# # print(eps)
# print(std)
# a = eps.mul(std)
# # print(a)
# # a = eps.mul(std).add_(mu)
# # print(a)
# return a
# else:
# # print(mu)
# print(mu)
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
# print(h3)
return self.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 59))
z = self.reparameterize(mu, logvar)
# print(z)
return self.decode(z), mu, logvar
def loss_function(recon_x, x, mu, logvar):
# print(recon_x)
# print(x)
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 59))
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
# KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# Normalise by same number of elements as in reconstruction
# KLD /= params["BATCH_SIZE"] * 59
KLD = 0
return BCE + KLD
def train(model):
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(params["EPOCH"]):
for i in range(0, len(data["train_x"]), params["BATCH_SIZE"]):
batch_range = min(params["BATCH_SIZE"], len(data["train_x"]) - i)
batch_x = [[data["word_to_idx"][w] for w in sent] +
[params["VOCAB_SIZE"] + 1] *
(params["MAX_SENT_LEN"] - len(sent))
for sent in data["train_x"][i:i + batch_range]]
model.train()
train_loss = 0
feature = Variable(torch.FloatTensor(batch_x))
if params["CUDA"]:
feature = feature.cuda()
optimizer.zero_grad()
recon_batch, mu, logvar = model(feature)
# print(recon_batch)
loss = loss_function(recon_batch, feature, mu, logvar)
loss.backward()
train_loss += loss.data[0]
optimizer.step()
print('Train Epoch: {} \tLoss: {:.6f}'.format(
epoch,loss.data[0] / len(feature)))
def test(model):
model.eval()
test_loss = 0
for i in range(0, len(data["test_x"]), params["BATCH_SIZE"]):
batch_range = min(params["BATCH_SIZE"], len(data["test_x"]) - i)
batch_x = [[data["word_to_idx"][w] for w in sent] +
[params["VOCAB_SIZE"] + 1] *
(params["MAX_SENT_LEN"] - len(sent))
for sent in data["test_x"][i:i + batch_range]]
data = Variable(torch.FloatTensor(batch_x), volatile=True)
if params["CUDA"]:
data = data.cuda()
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
test_loss /= len(data["text_x"])
print('====> Test set loss: {:.4f}'.format(test_loss))
|
hoxmark/Deep_reinforcement_active_learning
|
selection_strategies/models/vae.py
|
vae.py
|
py
| 3,987 |
python
|
en
|
code
| 17 |
github-code
|
6
|
72313218748
|
from flask import Flask, jsonify,request,json
from scrapper import scrap_cards
from config import *
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/scrap', methods = ['POST'])
def generate_json():
req = request.get_json(force=True)
print(req['url'])
scrap_cards(req['url'])
data = json.load(open(JSON_FOLDER+'output.json'))
return jsonify(data)
if __name__ == '__main__':
app.run()
|
mage1711/flask-scrapers-api
|
app.py
|
app.py
|
py
| 495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9690266532
|
"""
Xero Linked Transactions API
"""
from .api_base import ApiBase
class LinkedTransactions(ApiBase):
"""
Class for Linked Transactions API
"""
POST_LINKED_TRANSACTION = '/api.xro/2.0/LinkedTransactions'
def post(self, data):
"""
Create new invoice
Parameters:
data (dict): Data to create invoice
Returns:
Response from API
"""
return self._post_request(data, LinkedTransactions.POST_LINKED_TRANSACTION)
|
fylein/xero-sdk-py
|
xerosdk/apis/linked_transactions.py
|
linked_transactions.py
|
py
| 505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20143445172
|
"""Views for Learning Journal."""
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPNotFound
from anna_journal.models import Journals
from pyramid.security import remember, forget
from anna_journal.security import check_credentials
@view_config(route_name='login', renderer='../templates/login.jinja2')
def login(request):
"""The login in view for our admin."""
if request.method == 'POST':
username = request.params.get('username', '')
password = request.params.get('password', '')
if check_credentials(username, password):
headers = remember(request, username)
return HTTPFound(location=request.route_url('list_view'), headers=headers)
return {}
@view_config(route_name='logout')
def logout(request):
headers = forget(request)
return HTTPFound(request.route_url('list_view'), headers=headers)
@view_config(route_name='list_view', renderer='../templates/index.jinja2')
def list_view(request):
"""Display list of journal entries."""
JOURNALS = request.dbsession.query(Journals).all()
return {
'journals': JOURNALS
}
@view_config(route_name='detail_view', renderer='../templates/detail.jinja2')
def detail_view(request):
"""View single journal entry."""
entry_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journals).get(entry_id)
return {
'entry': entry
}
@view_config(route_name='create_view', renderer='../templates/form.jinja2', permission='secret', require_csrf=True)
def create_view(request):
"""Create a new view."""
if request.method == "POST" and request.POST:
if request.POST['title'] and request.POST['body']:
form_data = request.POST
new_entry = Journals(
title=form_data['title'],
body=form_data['body'],
creation_date=datetime.now(),
)
request.dbsession.add(new_entry)
return HTTPFound(location=request.route_url('list_view'))
return request.POST
@view_config(
route_name='update_view', renderer='../templates/form_edit.jinja2', permission='secret', require_csrf=True)
def update_view(request):
"""Update an existing view."""
entry_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journals).get(entry_id)
if not entry:
return HTTPNotFound
if request.method == "GET":
return {
'title': entry.title,
'body': entry.body
}
if request.method == "POST":
form_data = request.POST
entry.title = form_data['title']
entry.body = form_data['body']
request.dbsession.flush()
return HTTPFound(location=request.route_url('detail_view', id=entry_id))
|
Bonanashelby/pyramid-learning-journal
|
anna_journal/anna_journal/views/default.py
|
default.py
|
py
| 2,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19054013888
|
"""
Scripts to align sequences and transoform them into 1-hot encoding
"""
# Author: Alessio Milanese <[email protected]>
import shutil
import time
import subprocess
import shlex
import os
import errno
import sys
import tempfile
import numpy as np
import re
from stag.helpers import is_tool, read_fasta
#===============================================================================
# FUNCTIONS
#===============================================================================
# ------------------------------------------------------------------------------
# function to convert the nucleotide alignment into 1-hot encoding.
# Note that we select only the nucleotides that corresponds to the inner state
# of the HMM.
encoding_dic = {
"A": [0, 0, 0, 0, 1],
"C": [0, 0, 0, 1, 0],
"G": [0, 0, 1, 0, 0],
"T": [0, 1, 0, 0, 0],
"U": [0, 1, 0, 0, 0],
"others": [1, 0, 0, 0, 0]
}
def convert_alignment(alignment, verbose, as_numpy=False):
n_aligned_characters, n_char = 0, 0
converted_ali = list()
for character in alignment:
# 1-hot encoding
# the ACGTU are converted, everything else that is upper case, is considered
# as a gap ('-').
# for example also 'N' is converted to "-" -> "1,0,0,0,0"
# Note that the upper case letters and "-" represents alignment to the
# hidden state of the HMM.
if not character.islower():
n_char += 1
encoded_block = encoding_dic.get(character) #, encoding_dic["others"])
if encoded_block: #not encoded_block[0]:
# others' high bit = 1
n_aligned_characters += 1
else:
encoded_block = encoding_dic["others"]
converted_ali.extend(encoded_block)
#if as_numpy:
# converted_ali = np.array(list(map(bool, converted_ali)), dtype=bool)
return np.array(converted_ali, dtype=bool), n_aligned_characters / n_char * 100
# function that transform a protein MSA to a nucleotide MSA --------------------
# if check_length is True, then we check that
# len(protein) == len(gene)*3 OR len(protein)-3 == len(gene)*3
def protein2gene_alignment(gene_id, protein_alignment, gene_sequence, check_length=True):
# check that the length is correct
only_AA_from_ali = re.sub(r'\-', '', protein_alignment)
if check_length:
expected_gene_length = len(only_AA_from_ali) * 3
# check if lengths of gene and protein sequence match, with or without stop codon
if len(gene_sequence) != expected_gene_length and len(gene_sequence) - 3 != expected_gene_length:
sys.stderr.write("Error, length of genes/alignment is not correct")
sys.stderr.write(" (protein: "+str(len(only_AA_from_ali)*3)+", gene: "+str(len(gene_sequence))+")\n")
sys.stderr.write(" ID: "+gene_id+"\n")
return None
# convert alignment
pos_gene, al_gene = 0, list()
for res in protein_alignment:
found = False
if res == "-":
al_gene.append("---")
found = True
elif res.isupper():
al_gene.extend(gene_sequence[pos_gene:pos_gene + 3])
pos_gene += 3
found = True
elif res.islower():
found = True
# since we have to remove the lower case letter, we do not
# add those to the alignment, but we anyway increase pos_gene
pos_gene += 3
if not found:
sys.stderr.write("Error, character not identified\n")
return "".join(al_gene)
# ------------------------------------------------------------------------------
# main function as a generator
def align_generator(seq_file, protein_file, hmm_file, use_cmalign, n_threads, verbose, return_numpy, min_perc_state):
"""Align sequences and transform them into 1-hot encoding, ready for
classification.
Parameters
----------
seq_file: file with the nucleotide sequences [string]
protein_file: file with the protein sequences [string or None]
hmm_file: file with the hmm model [string]
use_cmalign: if True, we use cmalign. If false, we use hmmalign [bool]
n_threads: number of threads to use for cmalign (hmmalign can run only
on one thread) [string/int]
verbose: how much info to print [int]
return_numpy: True if you want to return a numpy array instead of a string
Returns
-------
Returns a generator with:
(fasta_id, aligned_sequence) tuples
"""
# number of sequences that pass and sont pass the filter
n_pass, n_not_pass = 0, 0
# check that the tools are available
if use_cmalign and not is_tool("cmalign"):
raise ValueError("[E::align] Error: cmalign is not in the path. Please install Infernal.")
elif not is_tool("hmmalign"):
raise ValueError("[E::align] Error: hmmalign is not in the path. Please install HMMER3.")
if not is_tool("esl-reformat"):
raise ValueError("[E::align] Error: esl-reformat is not in the path. Please install Easel.")
aligner = f"cmalign --cpu {n_threads}" if use_cmalign else "hmmalign"
seq_input = protein_file if protein_file else seq_file
align_cmd = f"{aligner} {hmm_file} {seq_input}"
if verbose > 4:
print(f"Command used to align the sequences: {align_cmd}", file=sys.stderr)
# run the command
CMD = shlex.split(align_cmd)
align_cmd = subprocess.Popen(CMD, stdout=subprocess.PIPE,)
# command to parse the alignment from STOCKHOLM to fasta format
cmd2 = "esl-reformat a2m -"
CMD2 = shlex.split(cmd2)
parse_cmd = subprocess.Popen(CMD2, stdin=align_cmd.stdout, stdout=subprocess.PIPE,)
if protein_file:
seq_stream = zip(read_fasta(parse_cmd.stdout, head_start=1),
read_fasta(open(seq_file), is_binary=False, head_start=1))
else:
seq_stream = read_fasta(parse_cmd.stdout, head_start=1)
for item in seq_stream:
if protein_file:
(pid, pseq), (gid, gseq) = item
if pid != gid:
sys.stderr.write("[E::align] Error. protein and gene identifiers {} {} don't match.".format(pid, gid))
sys.exit(1)
gseq = protein2gene_alignment(gid, pseq, gseq, check_length=True)
else:
gid, gseq = item
converted_ali, perc_aligned_characters = convert_alignment(gseq, verbose, as_numpy=return_numpy)
if perc_aligned_characters >= min_perc_state:
n_pass += 1
yield gid, converted_ali
else:
n_not_pass += 1
# check that hmmalign/cmalign finished correctly
align_cmd.stdout.close()
return_code = align_cmd.wait()
if return_code:
raise ValueError("[E::align] Error. hmmalign/cmalign failed.")
# check that converting the file worked correctly
parse_cmd.stdout.close()
return_code = parse_cmd.wait()
if return_code:
raise ValueError("[E::align] Error. esl-reformat failed.")
# print the number of sequences that were filtered
if verbose > 3:
print(f" Number of sequences that pass the filter: {n_pass}", file=sys.stderr)
print(f" Number of sequences that do not pass the filter: {n_not_pass}", file=sys.stderr)
# ------------------------------------------------------------------------------
# main function
def align_file(seq_file, protein_file, hmm_file, use_cmalign, n_threads, verbose, res_file, min_perc_state):
"""Align sequences and transform them into 1-hot encoding, ready for
classification.
Parameters
----------
seq_file : file with the nucleotide sequences [string]
protein_file: file with the protein sequences [string or None]
hmm_file : file with the hmm model [string]
use_cmalign : if True, we use cmalign. If false, we use hmmalign [bool]
n_threads: number of threads to use for cmalign (hmmalign can run only
on one thread) [string/int]
verbose: how much info to print [int]
res_file: where to save the result.
Returns
-------
It will save the aligned sequences to the specified file.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w")
os.chmod(temp_file.name, 0o644)
with temp_file:
for gid, ali in align_generator(seq_file, protein_file, hmm_file, use_cmalign,
n_threads, verbose, False, min_perc_state):
print(gid, *map(int, ali), sep="\t", file=temp_file)
try:
temp_file.flush()
os.fsync(temp_file.fileno())
except:
raise ValueError("[E::align] Error when saving the resulting file.")
try:
shutil.move(temp_file.name, res_file)
except:
raise ValueError(f"[E::align] The resulting file couldn't be saved. You can find the file here:\n{temp_file.name}.")
|
zellerlab/stag
|
stag/align.py
|
align.py
|
py
| 8,980 |
python
|
en
|
code
| 7 |
github-code
|
6
|
19672334600
|
import pytest
from bfprt.algo import insertion_sort, partition, select, swap
class TestInternal:
def test_swap(self):
items = [4, 1, 2, 5, 9, 8]
swap(items, 2, 3)
assert items == [4, 1, 5, 2, 9, 8]
@pytest.mark.parametrize("items, pivot_index, expected_items, expected_index", [
([4, 2, 1, 9, 5, 8], 0, [2, 1, 4, 9, 5, 8], 2),
([4, 2, 1, 9, 5, 8], 4, [4, 2, 1, 5, 8, 9], 3),
([2, 1], 0, [1, 2], 1),
([2, 1], 1, [1, 2], 0),
([3, 2, 1], 1, [1, 2, 3], 1),
])
def test_partition(self, items, pivot_index, expected_items, expected_index):
pivot_index = partition(items, 0, len(items) - 1, pivot_index)
assert pivot_index == expected_index
assert items == expected_items
def test_select(self):
for i in range(6):
items = [4, 2, 1, 9, 5, 8]
selected = select(items, 0, 5, i)
sorted = [1, 2, 4, 5, 8, 9]
assert selected == sorted[i]
def test_insertion_sort(self):
items = [4, 2, 9, 5, 8]
insertion_sort(items, 0, 4)
assert items == [2, 4, 5, 8, 9]
|
gregorybchris/bfprt
|
tests/test_internal.py
|
test_internal.py
|
py
| 1,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13955129653
|
import threading
class BuckysMessenger(threading.Thread):
# 'run' is a special thread function
def run(self):
# use the '_' if you just want to loop 10 times and don't care about variable
for _ in range (10):
print(threading.currentThread().getName())
x = BuckysMessenger(name='Send out messages')
y = BuckysMessenger(name='Receive messages')
# The start function basically goes to class and looks for 'run' function
x.start()
y.start()
|
eswartzendruber1/linux_setup
|
bin/src/sandbox/vid_34.py
|
vid_34.py
|
py
| 477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29259984179
|
import os
import sys
from datetime import datetime
from glob import glob
from re import split
from numpy import asarray, savetxt
class iGrav:
# find all the .tsf inside the input directory (even in the sub directory)
def get_all_tfs(self, input_folder):
paths_list = glob(input_folder + "/**/*.tsf", recursive=True)
if len(paths_list) <= 0:
print("[ERROR]: There's no .tsf file")
sys.exit()
else:
return paths_list
# from a .tsf file get the header with channels and units measured by the device
def get_header(self, path):
with open(path, "r") as file:
header = []
content = file.readlines()
for i, line in enumerate(content):
if "[CHANNELS]" in line: # get all the channels that the device measure
start_idx = i + 1
end_idx = start_idx
while len(content[end_idx]) > 1:
channel = split(r":", content[end_idx].strip())[-1]
end_idx += 1
header.append(channel)
if "[UNITS]" in line: # get measure units and add to the header
counter = 0
header_len = len(header)
start_idx = i + 1
end_idx = start_idx
while len(content[end_idx]) > 1 and counter < header_len:
unit = content[end_idx].strip()
header[counter] = f"{header[counter]} ({unit})"
counter += 1
end_idx += 1
timestamp = header[-1]
header.pop(-1)
header.insert(0, timestamp)
return header
# from a .tsf file get only the content without the header
def get_content(self, path):
with open(path, "r") as file:
content = file.readlines()
for i, line in enumerate(content):
if "[DATA]" in line:
start_idx = i + 1
while len(content[start_idx]) <= 1:
start_idx += 1
return content[start_idx:]
# process the file and write the content in CSV format in the output file
def process(self, file_path, output_path):
output_path = self.get_output_path(file_path, output_path)
header = self.get_header(file_path)
self.append_row_in_file(header, output_path) # add header in the output csv file
content = self.get_content(file_path)
last_dt = None
for line in content:
data = self.data_row_validator(line)
if data != None:
date = self.format_datetime(data[0])
columns = data[1:]
if last_dt is None or (last_dt is not None and (last_dt - date).total_seconds() <= -1):
last_dt = date
self.append_row_in_file([date, *columns], output_path)
# validate each content line and remove the NaN row or the row that dont have a correct datetime
def data_row_validator(self, row):
if "\x00" not in row:
data = split(r"\s{2,}", row.strip())
if data[0] != "" and data[0] != None:
try:
return data
except Exception as e:
print(f"[ERROR]: Error on formatting date | {e}")
return None
# append a array in a file using CSV format with numpy
def append_row_in_file(self, data, output_file):
output = asarray([[str(item) for item in data]])
with open(output_file, "a") as file:
savetxt(file, output, fmt="%s", delimiter=",", newline="\n")
# reformat the datetime in YYYY-MM-DD HH:mm:ss
def format_datetime(self, string):
date = string.split(" ")
return datetime.strptime(f"{'-'.join(date[:3])} {':'.join(date[3:])}", "%Y-%m-%d %H:%M:%S")
# from the input path and the output path generate the new CSV file path (get only the filename from the input path)
def get_output_path(self, input_path, output_path):
output = output_path
if not output.endswith(os.path.sep):
output += os.path.sep
file_name = os.path.basename(input_path).split(".")[0]
return f"{output}/{file_name}.csv"
def main():
igrav = iGrav()
input_path = sys.argv[1]
output_path = sys.argv[2]
if os.path.exists(input_path) and os.path.exists(output_path):
file_list = igrav.get_all_tfs(input_path)
for path in file_list:
igrav.process(path, output_path)
else:
print("[ERROR]: Input or Output path doesn't exist!")
if __name__ == "__main__":
main()
|
lucamir/iGravToCSV
|
main.py
|
main.py
|
py
| 4,775 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27390800021
|
# Find All Approximate Occurrences of a Pattern in a String
# https://rosalind.info/problems/ba1h/
from utilities import get_file, get_answer_file, hamming_distance
def approximate_pattern(pattern, strand, distance):
len_s = len(strand)
len_p = len(pattern)
result = []
for i in range(len_s-len_p+1):
if hamming_distance(strand[i:i+len_p], pattern) <= distance:
result.append(i)
return result
with get_file() as file:
pattern = file.readline().rstrip()
strand = file.readline().rstrip()
distance = int(file.readline().rstrip())
with get_answer_file() as file:
print(" ".join(map(str, approximate_pattern(pattern, strand, distance))), file=file)
|
Delta-Life/Bioinformatics
|
Rosalind/Bioinformatics Textbook Track/code/BA1H.py
|
BA1H.py
|
py
| 704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39255470036
|
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from proco.utils.tasks import update_cached_value
class SoftCacheManager(object):
CACHE_PREFIX = 'SOFT_CACHE'
def get(self, key):
value = cache.get('{0}_{1}'.format(self.CACHE_PREFIX, key), None)
if value:
if (
(value['expired_at'] and value['expired_at'] < timezone.now().timestamp())
or value.get('invalidated', True)
) and value.get('request_path', None):
update_cached_value.delay(url=value['request_path'])
return value['value']
def _invalidate(self, key):
value = cache.get(key, None)
if value:
value['invalidated'] = True
cache.set(key, value, None)
def invalidate_many(self, keys):
for key in keys:
self.invalidate(key)
def invalidate(self, key='*'):
if isinstance(key, str):
keys = cache.keys('{0}_{1}'.format(self.CACHE_PREFIX, key))
for key in keys:
self._invalidate(key)
elif isinstance(key, (list, tuple)):
self.invalidate_many(key)
def set(self, key, value, request_path=None, soft_timeout=settings.CACHES['default']['TIMEOUT']):
cache.set('{0}_{1}'.format(self.CACHE_PREFIX, key), {
'value': value,
'invalidated': False,
'request_path': request_path,
'expired_at': (timezone.now().timestamp() + soft_timeout) if soft_timeout else None,
}, None)
cache_manager = SoftCacheManager()
|
unicef/Project-Connect-BE
|
proco/utils/cache.py
|
cache.py
|
py
| 1,625 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74738442108
|
import os
import pandas as pd
import sys
from utility_funcs.vid_to_frame import convert_to_annotated_images_cvat
from utility_funcs.stratified_train_test_split import stratified_group_k_fold
irr_conf = 3
if sys.argv[1] == 'split':
trash_vids = [3, 39, 48]
df = pd.read_csv('data/frames.csv').dropna()
df = df.loc[~df['video'].isin(trash_vids)]
df['flow_class_str'] = df.FLOW_majority.astype(str)
df['final_class_comb'] = df[['flow_class_str', 'ETHNICITY', 'GENDER']].agg('_'.join, axis=1)
groups = df['video']
y = df['final_class_comb']
mapper = dict()
for i, class_type in enumerate(y.unique()):
mapper[class_type] = i
nomvar = y.replace(mapper)
for fold_ind, (dev_ind, val_ind) in enumerate(stratified_group_k_fold(nomvar, groups, k=3, seed=123)):
training = groups.iloc[dev_ind].unique()
validation = groups.iloc[val_ind].unique()
assert len(set(training) & set(validation)) == 0
train_data = df.loc[df['video'].isin(training)]
test_data = df.loc[df['video'].isin(validation)]
print("Train", training)
print(train_data['final_class_comb'].value_counts(normalize=False).sum())
print(train_data['final_class_comb'].value_counts(normalize=True))
print('Validation', validation)
print(test_data['final_class_comb'].value_counts(normalize=False).sum())
print(test_data['final_class_comb'].value_counts(normalize=True))
print(df['final_class_comb'].value_counts(normalize=False).sum())
training = [str(x) + '.mp4' for x in training]
validation = [str(x) + '.mp4' for x in validation]
else:
training = ['1.mp4', '11.mp4', '13.mp4', '14.mp4', '15.mp4', '16.mp4', '17.mp4', '18.mp4', '22.mp4', '23.mp4',
'25.mp4', '27.mp4', '30.mp4', '31.mp4', '32.mp4', '34.mp4', '35.mp4', '38.mp4', '4.mp4', '43.mp4',
'44.mp4', '46.mp4', '47.mp4', '49.mp4', '5.mp4', '50.mp4', '51.mp4', '53.mp4', '54.mp4', '55.mp4',
'57.mp4', '58.mp4', '59.mp4', '60.mp4', '7.mp4']
validation = ['28.mp4', '10.mp4', '12.mp4', '19.mp4', '2.mp4', '20.mp4', '21.mp4', '24.mp4', '26.mp4', '36.mp4',
'37.mp4',
'40.mp4', '41.mp4', '42.mp4', '45.mp4', '52.mp4', '6.mp4', '9.mp4']
print('pre-defined split')
input('Check the proportions above and press enter to continue... \n' +
'If the split is not sufficient enough press CTRL + C to stop this process.')
clip_list = [clip for clip in os.listdir('data/videos') if clip != '.gitkeep']
train_clips = [x for x in clip_list if x in training]
print('Clips used for training: ', train_clips)
validation_clips = [x for x in clip_list if x in validation]
print('Clips used for testing:', validation_clips)
convert_to_annotated_images_cvat(train_clips, sense='training', irr_confidence=irr_conf, balance='flow',
fold='initial', n_parallel=5)
convert_to_annotated_images_cvat(validation_clips, sense='validation', irr_confidence=irr_conf, balance='flow',
fold='initial', n_parallel=5)
|
LHumpe/COINs-CNN-FLOW
|
src/io/data_generators/data_gen_HpSearch.py
|
data_gen_HpSearch.py
|
py
| 3,086 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6919943057
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GdkPixbuf
class View(Gtk.Window):
# TODO: Make GUI prettier - low priority
# TODO: Change metric to *C and imperial to *F
def __init__(self):
super().__init__(title='Weather Forecast')
self._box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(self._box)
self._enter_city = Gtk.Entry()
self._enter_city.set_text('Enter city')
self._box.add(self._enter_city)
self._search_button = Gtk.Button('Search')
self._box.add(self._search_button)
self._units_format_combo = Gtk.ComboBoxText()
self._units_format_combo.append('metric', 'metric')
self._units_format_combo.append('imperial', 'imperial')
self._box.add(self._units_format_combo)
self._weather_image = Gtk.Image()
self._box.add(self._weather_image)
self._city_label = Gtk.Label()
self._box.add(self._city_label)
self._temperature_label = Gtk.Label()
self._box.add(self._temperature_label)
self._conditions_label = Gtk.Label()
self._box.add(self._conditions_label)
self._description_label = Gtk.Label()
self._box.add(self._description_label)
self._up_to_date_label = Gtk.Label()
self._box.add(self._up_to_date_label)
self.connect('destroy', Gtk.main_quit)
@staticmethod
def run():
Gtk.main()
def set_weather_icon(self, icon):
weather_icon_path = f'./icons/{self._get__weather_image_icon(icon)}.svg'
pixbuf = GdkPixbuf.Pixbuf().new_from_file(weather_icon_path)
self._weather_image.set_from_pixbuf(pixbuf=pixbuf)
def set_city(self, city):
self._city_label.set_label(city)
def set_temperature(self, temperature, units_format):
units_format_display = 'C' if units_format == 'metric' else 'F'
self._temperature_label.set_label(f'{temperature}\u00B0{units_format_display}')
def set_conditions(self, conditions):
self._conditions_label.set_label(conditions)
def set_description(self, description):
self._description_label.set_label(description)
def on_search(self, callback):
self._search_button.connect('clicked', lambda widget: callback(self._enter_city.get_text()
if self._enter_city.get_text() != 'Enter city' else ''))
def set_units_format(self, unit_format):
self._units_format_combo.set_active_id(unit_format)
def on_units_format_changed(self, callback):
self._units_format_combo.connect('changed', lambda widget: callback(self._units_format_combo.get_active_id()))
def set_up_to_date_message(self, is_weather_up_to_date=False):
color = 'green' if is_weather_up_to_date else 'red'
up_to_date_message = 'Less then 2 hours ago' if is_weather_up_to_date else 'More than 2 hours ago'
self._up_to_date_label.set_markup(f'<span color="{color}">Last update:\n{up_to_date_message}</span>')
def show_dialog(self, status):
if status == 'Unauthorized':
dialog_title = 'Authorization problem'
dialog_text = 'Wrong API key'
elif status == 'ConnectionError':
dialog_title = 'Connection problem'
dialog_text = 'Check internet connection'
elif status == 'NotFound':
dialog_title = 'City not found'
dialog_text = 'Try another city'
else:
dialog_title = 'Unknown problem'
dialog_text = 'Problem not known'
dialog = Gtk.MessageDialog(
transient_for=self,
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK,
text=dialog_title
)
dialog.format_secondary_text(dialog_text)
dialog.run()
dialog.destroy()
@staticmethod
def _get__weather_image_icon(icon_from_api):
icons_mapping = {
'01d': 'weather-clear',
'01n': 'weather-clear-night',
'02d': 'weather-few-clouds',
'02n': 'weather-clouds-night',
'03d': 'weather-clouds',
'03n': 'weather-few-clouds-night',
'04d': 'weather-overcast',
'04n': 'weather-overcast',
'09d': 'weather-showers-scattered',
'09n': 'weather-showers-scattered',
'10d': 'weather-showers',
'10n': 'weather-showers',
'11d': 'weather-storm',
'11n': 'weather-storm',
'13d': 'weather-snow',
'13n': 'weather-snow',
'50d': 'weather-fog',
'50n': 'weather-fog',
'N/A': 'weather-none'
}
return icons_mapping[icon_from_api]
|
lukasz130/WeatherForecast
|
sources/view.py
|
view.py
|
py
| 4,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43534065791
|
def convert_sample_to_shot_coQA(sample, with_knowledge=None):
prefix = f"{sample['meta']}\n"
for turn in sample["dialogue"]:
prefix += f"Q: {turn[0]}" +"\n"
if turn[1] == "":
prefix += f"A:"
return prefix
else:
prefix += f"A: {turn[1]}" +"\n"
return prefix
|
andreamad8/FSB
|
prompts/coQA.py
|
coQA.py
|
py
| 334 |
python
|
en
|
code
| 119 |
github-code
|
6
|
24692323834
|
#Import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Function for loading disaster reponse messages_filepath
Arguments:
messages_filepath: File path to file containing disaster
response messages
categories_filepath: File path to file containing disaster
response classification
Returns:
df: A dataframe containing the merged datasets
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(left=messages,right=categories,how='left',on='id')
return df
def clean_data(df):
'''
Function for cleaning the disaster response message dataset
Arguments:
df: Pandas dataframe
Returns:
df: Pandas dataframe
'''
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(';',expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0,:]
# use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda row: row[:-2]).values
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda row: row[-1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
df.drop('categories',axis=1,inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories],axis=1)
# Remove erroneous values
df = df[df['related']!=2]
# drop duplicates
df_dedup = df.drop_duplicates(subset='id')
df = df_dedup.drop_duplicates(subset='message',keep=False)
return df
def save_data(df, database_filename):
'''
Function for saving a dataset to a SQLlite database
Arguments:
df: Pandas dataframe. Dataset that needs to be saved
database_filename: Location where database should be saved
'''
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql('Disaster_messages', engine, index=False,if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
Rmostert/Disaster-response-pipeline
|
data/process_data.py
|
process_data.py
|
py
| 3,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37340652783
|
from input import Input
from word import Word
from data import Data
from display import Display as dp
from colorama import Fore
import numpy as np
# The Game Object
class LeWord:
# Only gets the mode,
def __init__(self, mode):
self.mode = mode
# The joker of a word is the count of vowels and consonants in it.
def joker(self, true_word, mode=1):
vowels = "AEIOUaeiou"
# checks the mode
if mode == 1:
vowel_c = 0
consonant_c = 0
# for every letter if it is vowel vowel count plus one else consonants plus one
for letter in true_word:
if letter in vowels:
vowel_c += 1
continue
consonant_c += 1
# then displays the joker for the word
dp.display_joker(vowel_c, consonant_c, mode=mode)
else:
# if mode is not 1 then for every word does the same process
for index, word in enumerate(true_word):
vowel_c = 0
consonant_c = 0
# for every letter if it is vowel vowel count plus one else consonants plus one
for letter in word:
if letter in vowels:
vowel_c += 1
continue
consonant_c += 1
# then displays the joker for the word
dp.display_joker(vowel_c, consonant_c, mode=mode, que=index+1)
print()
# starts the game with selected mode
def play_game(self, again):
if self.mode[2] in ["1", "one"]:
self.single_word_game()
else:
self.multi_word_game()
# the game with one-single word
def single_word_game(self):
# Picks the random word
true_word = Word.pick_random_word(self.mode[1])
# Turn true word into array of individual letters and indexes using numpy library
true_arr = np.array(list(true_word.upper()))
# Turn the array into [item id found] type
true_idx = [[item, idx, None] for idx, item in enumerate(true_arr)]
attempt = 0
guesses = []
# while the attempt count is less than the word length + 1
while attempt < self.mode[1] + 1:
# gets the guess from the user
guess_word = Input.ask_user_input(self.mode[1], attempt)
# looks for special inputs
if not guess_word:
break
elif guess_word == "JOKER":
self.joker(true_word, mode=self.mode[3])
continue
else:
# then appends the guess
guesses.append(guess_word)
# Turn guess into array of individual letters and indexes using numpy library
guess_arr = np.array(list(guess_word))
# Turn the array into [item id found] type
guess_idx = [[item, idx, None] for idx, item in enumerate(guess_arr)]
matched = []
existing = []
# uses numpy to look if any matches between true and guess array pairs then appends to 'matching'
matching = np.where(true_arr == guess_arr)[0]
# for the matches between two array puts 'YES' to found
for item in matching:
matched.append(guess_idx[item][0])
guess_idx[item][2], true_idx[item][2] = 'YES', 'YES'
# gets the unfounded words
rem_guess = [item for item in guess_idx if item[2] != 'YES']
rem_true = [item for item in true_idx if item[2] != 'YES']
# for the letters that unfounds looks if they are exist in somewhere in true word
# if it is then marks found with 'EX'
for guess in rem_guess:
for true in rem_true:
if guess[0] == true[0]:
if list(true_arr).count(guess[0]) > (matched.count(guess[0]) + existing.count(guess[0])):
existing.append(guess[0])
guess[2], true[2] = 'EX', 'EX'
else:
continue
# Colors and marks letters based on match, exist, or not exist
final = Word.mark_letters(guess_idx)
# adds space to make it identical in table
for letter in final:
letter += " "
# Turn current guess into table
guess_df = Word.build_df(attempt, final, attempt)
print(guess_df)
# checks whether all the letters are found or not.
if guess_word.lower() == true_word.lower():
# if it is then makes the congrats
dp.display_cong(1, true_word=true_word.upper())
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses,
True if len(true_word) > attempt + 1 else False])
return again
else:
# if not plus ones the attempt count and continues
attempt += 1
# if attempt count is at limit then
if attempt == self.mode[1] + 1:
# gives the fail message
dp.display_fail(1, true_word)
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses,
True if len(true_word) > attempt + 1 else False])
return again
def multi_word_game(self):
founded_words = []
# Picks the random words
true_word = Word.pick_random_word(self.mode[1], int(self.mode[3]))
# Turn true words into array of individual letters and indexes using numpy library
true_arr = np.array(list([j for j in i.lower()] for i in true_word))
# Turn the array into [item id found] type
true_idx = [[[item, idx, None] for idx, item in enumerate(element)] for element in true_arr]
attempt = 0
guesses_word = []
# while the attempt count is less than the word length + 5
while attempt < self.mode[1] + 5:
# gets the guess from the user
guess_word = Input.ask_user_input(self.mode[1], attempt)
# looks for special inputs
if not guess_word:
break
elif guess_word == "JOKER":
self.joker(true_word, mode=self.mode[3])
continue
else:
# then appends the guess
guesses_word.append(guess_word)
# Turn guess into array of individual letters and indexes using numpy library
guess_arr = np.array(list(guess_word.lower()))
# Turn the array into [item id found] type
guess_idx = [[[item, idx, None] for idx, item in enumerate(guess_arr)] for i in range(int(self.mode[3]))]
matched = []
existing = []
matches = []
# for every word in true words
for element in true_arr:
# uses numpy to look if any matches between true and guess array pairs then appends to 'matched'
matches.append(np.where(element == guess_arr)[0])
# looks if other words founded before if it is then displays them
for founds in founded_words:
index, word = founds[0], founds[1]
guess_idx[index] = [[item, idx, 'YES'] for idx, item in enumerate(word)]
# for the matches between two array puts 'YES' to found
for index, match in enumerate(matches):
for item in match:
matched.append([index, guess_idx[index][item][0]])
guess_idx[index][item][2], true_idx[index][item][2] = 'YES', 'YES'
# gets the unfounded letters
rem_guess = []
for element in guess_idx:
word = []
for item in element:
if item[2] != 'YES':
word.append(item)
rem_guess.append(word)
# gets the unfounded letters
rem_true = []
for element in true_idx:
word = []
for item in element:
if item[2] != 'YES':
word.append(item)
rem_true.append(word)
# for the letters that unfounds looks if they are exist in somewhere in true word
# if it is then marks found with 'EX'
for index_g, guesses in enumerate(rem_guess):
for guess in guesses:
for index, word in enumerate(rem_true):
for true in word:
if index_g == index:
if guess[0] == true[0]:
if list(true_arr[index]).count(guess[0]) > \
(matched.count(guess[0]) + existing.count(guess[0])):
existing.append(guess[0])
guess[2], true[2] = 'EX', 'EX'
else:
continue
# Colors and marks letters based on match, exist, or not exist
final = Word.mark_letters(guess_idx, mode=self.mode[3])
# adds space to make it identical in table
for letter in final:
letter += " "
# Turn current guess into table
guess_df = Word.build_df(attempt, final, attempt, mode=int(self.mode[3]))
print(guess_df)
# if any word is founded then adds them to founded words
for index, word in enumerate(true_word):
if guess_word.lower() == word.lower():
founded_words.append([index, word])
# checks whether all the letters are found or not.
if len(true_word) == len(founded_words):
# if it is then makes the congrats
dp.display_cong(len(true_word), true_word=true_word)
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses_word,
True if len(true_word) > attempt + 1 else False])
return again
else:
# if not plus ones the attempt count and continues
attempt += 1
# if attempt count is at limit then
if attempt == self.mode[1] + 5:
# gives the fail message
dp.display_fail(len(true_word), true_word)
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses_word,
True if len(true_word) > attempt + 1 else False])
return again
# Starts the LE-WORD
@staticmethod
def start_le_word():
opening = True
again = False
# Displays the menu for user to choose
mode = dp.display_menu()
# Until the user types mode 'quit' the game continues
while mode[0] != "quit":
# if it is not the first time and not the again after the game prints menu
if not opening and not again:
mode = dp.display_menu()
if mode[0] == "quit":
break
# if choice statics displays statics
if mode[0] == "statics":
dp.display_statics()
# if choice rules displays rules
elif mode[0] == "rules":
dp.display_rules()
opening = False
# Creates a LeWord object
game = LeWord(mode)
# if the mode is play
if mode[0] == "play":
# then looks at the game mode
if mode[2] in ["one", "1"]:
# if single then single_word_game
again = game.single_word_game()
elif mode[2] == "multi":
# if multi then multi_word_game
again = game.multi_word_game()
# after game ends if user wants to play again
if again:
# asks user to play in which mode
a, b, c = Input.ask_mode()
if not mode:
opening = False
again = False
# assigns the mode and continues
mode = ["play", a, b, c]
|
mburaozkan/LeWord-The-Word-Game
|
game.py
|
game.py
|
py
| 14,751 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39627693477
|
# __author__ = "Chang Gao"
# __copyright__ = "Copyright 2018 to the author"
# __license__ = "Private"
# __version__ = "0.1.0"
# __maintainer__ = "Chang Gao"
# __email__ = "[email protected]"
# __status__ = "Prototype"
import sys
import os
import torch as t
import torch.nn.functional as F
from torch.autograd.function import Function
import time
import math
def save_normalization(save_path, tr_mean, tr_std, lab_mean, lab_std):
fn_base = os.path.splitext(save_path)[0]
print("\nSaving normalization parameters to " + str(fn_base)+'-XX.pt')
norm = {
'tr_mean': tr_mean,
'tr_std': tr_std,
'lab_mean': lab_mean,
'lab_std': lab_std,
}
t.save(norm, str(fn_base+'-norm.pt'))
def load_normalization(save_path):
fn_base = os.path.splitext(save_path)[0]
print("\nLoading normalization parameters from ", str(fn_base))
norm = t.load(fn_base+'-norm.pt')
return norm['tr_mean'], norm['tr_std'], norm['lab_mean'], norm['lab_std']
# print command line (maybe to use in a script)
def print_commandline(parser):
args = parser.parse_args()
print('Command line:')
print('python '+os.path.basename(sys.argv[0]), end=' ')
for arg in vars(args):
print('--' + str(arg) + ' "' + str(getattr(args, arg))+'"', end=' ')
print()
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def to_sparse(x):
""" converts dense tensor x to sparse format """
x_typename = t.typename(x).split('.')[-1]
sparse_tensortype = getattr(t.sparse, x_typename)
indices = t.nonzero(x)
if indices.nelement() == 0: # if all elements are zeros
print("1", indices)
return sparse_tensortype(*x.shape)
else:
print("2", indices)
indices = indices.t()
values = x[tuple(indices[i] for i in range(indices.shape[0]))]
return sparse_tensortype(indices, values, x.size())
def quantizeTensor(x, m, n, en):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
if en == 0:
return x
power = 2. ** n
clip_val = 2. ** (m + n - 1)
value = t.round(x * power)
# value = GradPreserveRoundOp.apply(x * power) # rounding
value = t.clamp(value, -clip_val, clip_val - 1) # saturation arithmetic
value = value / power
return value
def quantize_rnn(net, qi, qf, en):
for name, param in net.named_parameters():
if 'rnn' in name:
param.data = quantizeTensor(param.data, qi, qf, en)
return net
def pruneTensor(x, alpha):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
n_neuron = x.size(0)
n_input = x.size(1)
prune_prob_mask = t.exp(-alpha * t.unsqueeze(t.arange(0, n_neuron), dim=1).repeat(1, n_input).float()).cuda()
prune_rand_mask = t.rand(n_neuron, n_input).cuda()
prune_mask = prune_rand_mask.masked_fill_(prune_rand_mask > prune_prob_mask, 1)
prune_mask = prune_mask.masked_fill_(prune_rand_mask <= prune_prob_mask, 0)
_, indices = t.sort(t.abs(x), 0)
# print("indices shape", indices.size())
# print("prune_mask shape", prune_mask.size())
# print("x shape", x.size())
for j in range(0, n_input):
x[indices[:, j], j] *= prune_mask[:, j]
return x
def targetedDropout(x, gamma, alpha, epoch):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
t.manual_seed(epoch)
t.cuda.manual_seed_all(epoch)
n_elements = x.numel()
drop_part = round(n_elements * gamma)
weight_vec = x.view(-1)
weight_vec_abs = t.abs(weight_vec)
sorted, indices = t.sort(weight_vec_abs)
# print(sorted)
drop_indices = indices[0:drop_part]
drop_rand_mask = t.rand(drop_indices.size(0)).cuda()
drop_mask = t.ones(drop_indices.size(0)).cuda()
drop_mask = drop_mask.masked_fill_(drop_rand_mask <= alpha, 0)
weight_vec[drop_indices] *= drop_mask
weight = t.reshape(weight_vec, (x.size(0), x.size(1)))
return weight
def alignedTargetedDropout(x, gamma, alpha, num_pe, epoch):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
n_rows = x.shape[0]
n_cols = x.shape[1]
# Split and shuffle weight matrix
for i in range(0, num_pe):
for j in range(0, n_cols):
targetedDropout(x[np.arange(i, n_rows, num_pe), j], gamma, alpha, epoch)
return x
class GradPreserveRoundOp(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
def forward(ctx, input):
output = t.round(input)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_output
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# print(grad_output.size())
# if not t.equal(grad_output, QuantizeT(grad_output, dW_qp)): print("grad_output not quantized")
if ctx.needs_input_grad[0]:
grad_input = grad_output
# Return same number of parameters as "def forward(...)"
return grad_input
class GradPreserveThreshold(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
def forward(ctx, input, threshold, value):
output = F.threshold(input, threshold, value)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_output
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# print(grad_output.size())
# if not t.equal(grad_output, QuantizeT(grad_output, dW_qp)): print("grad_output not quantized")
if ctx.needs_input_grad[0]:
grad_input = grad_output
# Return same number of parameters as "def forward(...)"
return grad_input
def look_ahead_seq(seq_in, t_width=16, padding=0, batch_first=0):
# Convert input sequence to batch first shape (seq_len, n_batch, n_feature)
seq = seq_in
if batch_first:
seq = seq_in.transpose(0, 1)
seq_len = seq.size(0)
n_batch = seq.size(1)
n_feature = seq.size(2)
# int(t.ceil(float(seq_len)/float(t_width)))
new_seq = []
for i in range(0, seq_len):
if i < seq_len - t_width:
seq_block = seq[i:i + t_width, :, :]
else:
seq_block = seq[i:, :, :]
seq_block_pad = t.zeros([t_width - (seq_len - i), n_batch, n_feature], dtype=t.float32).cuda()
seq_block = t.cat((seq_block, seq_block_pad), 0)
new_seq.append(seq_block)
new_seq = t.stack(new_seq, 0)
new_seq = new_seq.transpose(1, 2)
new_seq = new_seq.transpose(0, 1)
new_seq = new_seq.transpose(2, 3)
return new_seq
def look_around_seq(seq_in, t_width=16, padding=0, batch_first=0):
# Convert input sequence to batch first shape (seq_len, n_batch, n_feature)
seq = seq_in
if batch_first:
seq = seq_in.transpose(0, 1)
seq_len = seq.size(0)
n_batch = seq.size(1)
n_feature = seq.size(2)
# int(t.ceil(float(seq_len)/float(t_width)))
new_seq = []
for i in range(0, seq_len):
if i >= seq_len - t_width:
seq_block = seq[i - t_width:, :, :]
seq_block_pad = t.zeros([t_width - (seq_len - i) + 1, n_batch, n_feature], dtype=t.float32).cuda()
seq_block = t.cat((seq_block, seq_block_pad), 0)
elif i < t_width:
seq_block = seq[0:i + 1 + t_width, :, :]
seq_block_pad = t.zeros([t_width - i, n_batch, n_feature], dtype=t.float32).cuda()
seq_block = t.cat((seq_block, seq_block_pad), 0)
else:
seq_block = seq[i - t_width:i + 1 + t_width, :, :]
# print(seq_block.size())
new_seq.append(seq_block)
new_seq = t.stack(new_seq, 0)
new_seq = new_seq.transpose(1, 2)
new_seq = new_seq.transpose(0, 1)
new_seq = new_seq.transpose(2, 3)
return new_seq
def get_temporal_sparsity(list_layer, seq_len, threshold):
# Evaluate Sparsity
num_zeros = 0
num_elems = 0
# print(seq_len.size())
# Iterate through layers
for layer in list_layer:
all_delta_vec = layer.transpose(0, 1)
all_delta_vec = t.abs(all_delta_vec) # Take absolute values of all delta vector elements
for i, delta_vec in enumerate(all_delta_vec):
seq = delta_vec[:seq_len[i], :]
zero_mask = seq < threshold
num_zeros += t.sum(zero_mask)
num_elems += t.numel(zero_mask)
sparsity = float(num_zeros) / float(num_elems)
return sparsity
|
SensorsINI/DeltaGRU-cartpole
|
modules/util.py
|
util.py
|
py
| 10,645 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7926234515
|
import pandas as pd
import yfinance as yf
# Read the symbols from a CSV file
symbols_df = pd.read_csv("symbols.csv")
symbols = symbols_df["Symbol"].tolist()
# Specify the years
years = [2021, 2022]
# Create an empty list to store the dataframes for each stock
dfs = []
# Iterate over the symbols
for symbol in symbols:
try:
# Get the dividends data for the specified years
stock = yf.Ticker(symbol)
dividends = stock.dividends
for year in years:
dividends_year = dividends.loc[str(year)]
if dividends_year.empty:
print(f"No dividend data available for {symbol} in {year}.")
continue
# Get the market capitalization data
try:
market_cap = float(stock.info["marketCap"])
except KeyError:
market_cap = None
# Get the stock price data for the specified year
stock_price = stock.history(start=f"{year}-01-01", end=f"{year}-12-31")
# Calculate the price at the beginning and end of the year
price_at_beginning = stock_price.iloc[0]['Close']
price_at_end = stock_price.iloc[-1]['Close']
# Create a dataframe for the current stock and year
data = {
"Symbol": symbol,
"Year": year,
"Dividend Date": dividends_year.index.strftime('%m/%d/%Y'),
"Market Capitalization": market_cap,
"Count of total dividends paid for that year": len(dividends_year),
"How much was paid": dividends_year.tolist(),
"Price at Beginning of Year": price_at_beginning,
"Price at End of Year": price_at_end
}
df = pd.DataFrame(data)
# Append the dataframe to the list
dfs.append(df)
except Exception as e:
print(f"Got error from Yahoo API for ticker {symbol}, Error: {str(e)}")
print(f"Skipping symbol {symbol} due to data unavailability.")
# Concatenate the dataframes for all stocks and years
result_df = pd.concat(dfs)
# Convert columns to float type
result_df["Market Capitalization"] = result_df["Market Capitalization"].astype(float)
result_df["Count of total dividends paid for that year"] = result_df["Count of total dividends paid for that year"].astype(float)
# Save the dataframe to a CSV file
result_df.to_csv("temp_dividend_data_2022_2023.csv", index=False)
|
kmlspktaa/data-analytics
|
economics/dividends-trading/development/dividend-stocks.py
|
dividend-stocks.py
|
py
| 2,490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14374871985
|
# coding=utf-8
"""Unit tests for activitypub.py."""
from base64 import b64encode
import copy
from datetime import datetime, timedelta
from hashlib import sha256
import logging
from unittest import skip
from unittest.mock import patch
from flask import g
from google.cloud import ndb
from granary import as2, microformats2
from httpsig import HeaderSigner
from oauth_dropins.webutil.testutil import requests_response
from oauth_dropins.webutil.util import json_dumps, json_loads
import requests
from urllib3.exceptions import ReadTimeoutError
from werkzeug.exceptions import BadGateway
# import first so that Fake is defined before URL routes are registered
from .testutil import Fake, TestCase
import activitypub
from activitypub import ActivityPub, postprocess_as2
import common
from models import Follower, Object
import protocol
from web import Web
# have to import module, not attrs, to avoid circular import
from . import test_web
ACTOR = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/users/swentel',
'type': 'Person',
'inbox': 'http://mas.to/inbox',
'name': 'Mrs. ☕ Foo',
'icon': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
'image': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
}
ACTOR_BASE = {
'@context': [
'https://www.w3.org/ns/activitystreams',
'https://w3id.org/security/v1',
],
'type': 'Person',
'id': 'http://localhost/user.com',
'url': 'http://localhost/r/https://user.com/',
'preferredUsername': 'user.com',
'summary': '',
'inbox': 'http://localhost/user.com/inbox',
'outbox': 'http://localhost/user.com/outbox',
'following': 'http://localhost/user.com/following',
'followers': 'http://localhost/user.com/followers',
'endpoints': {
'sharedInbox': 'http://localhost/ap/sharedInbox',
},
'publicKey': {
'id': 'http://localhost/user.com#key',
'owner': 'http://localhost/user.com',
'publicKeyPem': 'populated in setUp()',
},
}
ACTOR_BASE_FULL = {
**ACTOR_BASE,
'name': 'Ms. ☕ Baz',
'attachment': [{
'name': 'Web site',
'type': 'PropertyValue',
'value': '<a rel="me" href="https://user.com/"><span class="invisible">https://</span>user.com<span class="invisible">/</span></a>',
}],
}
REPLY_OBJECT = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Note',
'content': 'A ☕ reply',
'id': 'http://mas.to/reply/id',
'url': 'http://mas.to/reply',
'inReplyTo': 'https://user.com/post',
'to': [as2.PUBLIC_AUDIENCE],
}
REPLY_OBJECT_WRAPPED = copy.deepcopy(REPLY_OBJECT)
REPLY_OBJECT_WRAPPED['inReplyTo'] = 'http://localhost/r/https://user.com/post'
REPLY = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Create',
'id': 'http://mas.to/reply/as2',
'object': REPLY_OBJECT,
}
NOTE_OBJECT = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Note',
'content': '☕ just a normal post',
'id': 'http://mas.to/note/id',
'url': 'http://mas.to/note',
'to': [as2.PUBLIC_AUDIENCE],
'cc': [
'https://mas.to/author/followers',
'https://masto.foo/@other',
'http://localhost/target', # redirect-wrapped
],
}
NOTE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Create',
'id': 'http://mas.to/note/as2',
'actor': 'https://masto.foo/@author',
'object': NOTE_OBJECT,
}
MENTION_OBJECT = copy.deepcopy(NOTE_OBJECT)
MENTION_OBJECT.update({
'id': 'http://mas.to/mention/id',
'url': 'http://mas.to/mention',
'tag': [{
'type': 'Mention',
'href': 'https://masto.foo/@other',
'name': '@[email protected]',
}, {
'type': 'Mention',
'href': 'http://localhost/tar.get', # redirect-wrapped
'name': '@[email protected]',
}],
})
MENTION = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Create',
'id': 'http://mas.to/mention/as2',
'object': MENTION_OBJECT,
}
# based on example Mastodon like:
# https://github.com/snarfed/bridgy-fed/issues/4#issuecomment-334212362
# (reposts are very similar)
LIKE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://mas.to/like#ok',
'type': 'Like',
'object': 'https://user.com/post',
'actor': 'https://mas.to/actor',
}
LIKE_WRAPPED = copy.deepcopy(LIKE)
LIKE_WRAPPED['object'] = 'http://localhost/r/https://user.com/post'
LIKE_ACTOR = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/actor',
'type': 'Person',
'name': 'Ms. Actor',
'preferredUsername': 'msactor',
'icon': {'type': 'Image', 'url': 'https://user.com/pic.jpg'},
'image': [
{'type': 'Image', 'url': 'https://user.com/thumb.jpg'},
{'type': 'Image', 'url': 'https://user.com/pic.jpg'},
],
}
LIKE_WITH_ACTOR = {
**LIKE,
'actor': LIKE_ACTOR,
}
# repost, should be delivered to followers if object is a fediverse post,
# translated to webmention if object is an indieweb post
REPOST = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/users/alice/statuses/654/activity',
'type': 'Announce',
'actor': ACTOR['id'],
'object': NOTE_OBJECT['id'],
'published': '2023-02-08T17:44:16Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
}
REPOST_FULL = {
**REPOST,
'actor': ACTOR,
'object': NOTE_OBJECT,
}
FOLLOW = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/6d1a',
'type': 'Follow',
'actor': ACTOR['id'],
'object': 'https://user.com/',
}
FOLLOW_WRAPPED = copy.deepcopy(FOLLOW)
FOLLOW_WRAPPED['object'] = 'http://localhost/user.com'
FOLLOW_WITH_ACTOR = copy.deepcopy(FOLLOW)
FOLLOW_WITH_ACTOR['actor'] = ACTOR
FOLLOW_WRAPPED_WITH_ACTOR = copy.deepcopy(FOLLOW_WRAPPED)
FOLLOW_WRAPPED_WITH_ACTOR['actor'] = ACTOR
FOLLOW_WITH_OBJECT = copy.deepcopy(FOLLOW)
FOLLOW_WITH_OBJECT['object'] = ACTOR
ACCEPT_FOLLOW = copy.deepcopy(FOLLOW_WITH_ACTOR)
del ACCEPT_FOLLOW['@context']
del ACCEPT_FOLLOW['actor']['@context']
ACCEPT_FOLLOW['actor']['image'] = {'type': 'Image', 'url': 'https://user.com/me.jpg'}
ACCEPT_FOLLOW['object'] = 'http://localhost/user.com'
ACCEPT = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Accept',
'id': 'http://localhost/web/user.com/followers#accept-https://mas.to/6d1a',
'actor': 'http://localhost/user.com',
'object': {
**ACCEPT_FOLLOW,
'url': 'https://mas.to/users/swentel#followed-https://user.com/',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
},
'to': ['https://www.w3.org/ns/activitystreams#Public'],
}
UNDO_FOLLOW_WRAPPED = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/6d1b',
'type': 'Undo',
'actor': 'https://mas.to/users/swentel',
'object': FOLLOW_WRAPPED,
}
DELETE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mas.to/users/swentel#delete',
'type': 'Delete',
'actor': 'https://mas.to/users/swentel',
'object': 'https://mas.to/users/swentel',
}
UPDATE_PERSON = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://a/person#update',
'type': 'Update',
'actor': 'https://mas.to/users/swentel',
'object': {
'type': 'Person',
'id': 'https://a/person',
},
}
UPDATE_NOTE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://a/note#update',
'type': 'Update',
'actor': 'https://mas.to/users/swentel',
'object': {
'type': 'Note',
'id': 'https://a/note',
},
}
WEBMENTION_DISCOVERY = requests_response(
'<html><head><link rel="webmention" href="/webmention"></html>')
HTML = requests_response('<html></html>', headers={
'Content-Type': common.CONTENT_TYPE_HTML,
})
HTML_WITH_AS2 = requests_response("""\
<html><meta>
<link href='http://as2' rel='alternate' type='application/activity+json'>
</meta></html>
""", headers={
'Content-Type': common.CONTENT_TYPE_HTML,
})
AS2_OBJ = {'foo': ['bar']}
AS2 = requests_response(AS2_OBJ, headers={
'Content-Type': as2.CONTENT_TYPE,
})
NOT_ACCEPTABLE = requests_response(status=406)
@patch('requests.post')
@patch('requests.get')
@patch('requests.head')
class ActivityPubTest(TestCase):
def setUp(self):
super().setUp()
self.request_context.push()
self.user = self.make_user('user.com', has_hcard=True, has_redirects=True,
obj_as2={**ACTOR, 'id': 'https://user.com/'})
self.swentel_key = ndb.Key(ActivityPub, 'https://mas.to/users/swentel')
self.masto_actor_key = ndb.Key(ActivityPub, 'https://mas.to/actor')
ACTOR_BASE['publicKey']['publicKeyPem'] = self.user.public_pem().decode()
self.key_id_obj = Object(id='http://my/key/id', as2={
**ACTOR,
'publicKey': {
'id': 'http://my/key/id#unused',
'owner': 'http://own/er',
'publicKeyPem': self.user.public_pem().decode(),
},
})
self.key_id_obj.put()
def assert_object(self, id, **props):
props.setdefault('delivered_protocol', 'web')
return super().assert_object(id, **props)
def sign(self, path, body):
"""Constructs HTTP Signature, returns headers."""
digest = b64encode(sha256(body.encode()).digest()).decode()
headers = {
'Date': 'Sun, 02 Jan 2022 03:04:05 GMT',
'Host': 'localhost',
'Content-Type': as2.CONTENT_TYPE,
'Digest': f'SHA-256={digest}',
}
hs = HeaderSigner('http://my/key/id#unused', self.user.private_pem().decode(),
algorithm='rsa-sha256', sign_header='signature',
headers=('Date', 'Host', 'Digest', '(request-target)'))
return hs.sign(headers, method='POST', path=path)
def post(self, path, json=None):
"""Wrapper around self.client.post that adds signature."""
body = json_dumps(json)
return self.client.post(path, data=body, headers=self.sign(path, body))
def test_actor_fake(self, *_):
self.make_user('user.com', cls=Fake, obj_as2={
'type': 'Person',
'id': 'https://user.com/',
})
got = self.client.get('/ap/fake/user.com')
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
type = got.headers['Content-Type']
self.assertTrue(type.startswith(as2.CONTENT_TYPE), type)
self.assertEqual({
'@context': ['https://w3id.org/security/v1'],
'type': 'Person',
'id': 'http://bf/fake/user.com/ap',
'preferredUsername': 'user.com',
'url': 'http://localhost/r/user.com',
'summary': '',
'inbox': 'http://bf/fake/user.com/ap/inbox',
'outbox': 'http://bf/fake/user.com/ap/outbox',
'following': 'http://bf/fake/user.com/ap/following',
'followers': 'http://bf/fake/user.com/ap/followers',
'endpoints': {'sharedInbox': 'http://localhost/ap/sharedInbox'},
'publicKey': {
'id': 'http://localhost/user.com#key',
'owner': 'http://localhost/user.com',
'publicKeyPem': self.user.public_pem().decode(),
},
}, got.json)
def test_actor_web(self, *_):
"""Web users are special cased to drop the /web/ prefix."""
got = self.client.get('/user.com')
self.assertEqual(200, got.status_code)
type = got.headers['Content-Type']
self.assertTrue(type.startswith(as2.CONTENT_TYPE), type)
self.assertEqual({
**ACTOR_BASE,
'name': 'Mrs. ☕ Foo',
'icon': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
'image': {'type': 'Image', 'url': 'https://user.com/me.jpg'},
}, got.json)
def test_actor_blocked_tld(self, _, __, ___):
got = self.client.get('/foo.json')
self.assertEqual(404, got.status_code)
def test_actor_new_user_fetch(self, _, mock_get, __):
self.user.obj_key.delete()
self.user.key.delete()
protocol.objects_cache.clear()
mock_get.return_value = requests_response(test_web.ACTOR_HTML)
got = self.client.get('/user.com')
self.assertEqual(200, got.status_code)
self.assert_equals(ACTOR_BASE_FULL, got.json, ignore=['publicKeyPem'])
def test_actor_new_user_fetch_no_mf2(self, _, mock_get, __):
self.user.obj_key.delete()
self.user.key.delete()
protocol.objects_cache.clear()
mock_get.return_value = requests_response('<html></html>')
got = self.client.get('/user.com')
self.assertEqual(200, got.status_code)
self.assert_equals(ACTOR_BASE, got.json, ignore=['publicKeyPem'])
def test_actor_new_user_fetch_fails(self, _, mock_get, __):
mock_get.side_effect = ReadTimeoutError(None, None, None)
got = self.client.get('/nope.com')
self.assertEqual(504, got.status_code)
def test_individual_inbox_no_user(self, mock_head, mock_get, mock_post):
self.user.key.delete()
mock_get.side_effect = [self.as2_resp(LIKE_ACTOR)]
reply = {
**REPLY,
'actor': LIKE_ACTOR,
}
self._test_inbox_reply(reply, mock_head, mock_get, mock_post)
self.assert_user(ActivityPub, 'https://mas.to/actor',
obj_as2=LIKE_ACTOR, direct=True)
def test_inbox_activity_without_id(self, *_):
note = copy.deepcopy(NOTE)
del note['id']
resp = self.post('/ap/sharedInbox', json=note)
self.assertEqual(400, resp.status_code)
def test_inbox_reply_object(self, mock_head, mock_get, mock_post):
self._test_inbox_reply(REPLY_OBJECT, mock_head, mock_get, mock_post)
self.assert_object('http://mas.to/reply/id',
source_protocol='activitypub',
our_as1=as2.to_as1(REPLY_OBJECT),
type='comment')
# auto-generated post activity
self.assert_object(
'http://mas.to/reply/id#bridgy-fed-create',
source_protocol='activitypub',
our_as1={
**as2.to_as1(REPLY),
'id': 'http://mas.to/reply/id#bridgy-fed-create',
'published': '2022-01-02T03:04:05+00:00',
},
status='complete',
delivered=['https://user.com/post'],
type='post',
notify=[self.user.key],
)
def test_inbox_reply_object_wrapped(self, mock_head, mock_get, mock_post):
self._test_inbox_reply(REPLY_OBJECT_WRAPPED, mock_head, mock_get, mock_post)
self.assert_object('http://mas.to/reply/id',
source_protocol='activitypub',
our_as1=as2.to_as1(REPLY_OBJECT),
type='comment')
# auto-generated post activity
self.assert_object(
'http://mas.to/reply/id#bridgy-fed-create',
source_protocol='activitypub',
our_as1={
**as2.to_as1(REPLY),
'id': 'http://mas.to/reply/id#bridgy-fed-create',
'published': '2022-01-02T03:04:05+00:00',
},
status='complete',
delivered=['https://user.com/post'],
type='post',
notify=[self.user.key],
)
def test_inbox_reply_create_activity(self, mock_head, mock_get, mock_post):
self._test_inbox_reply(REPLY, mock_head, mock_get, mock_post)
self.assert_object('http://mas.to/reply/id',
source_protocol='activitypub',
our_as1=as2.to_as1({
**REPLY_OBJECT,
'author': None,
}),
type='comment')
# sent activity
self.assert_object(
'http://mas.to/reply/as2',
source_protocol='activitypub',
as2=REPLY,
status='complete',
delivered=['https://user.com/post'],
type='post',
notify=[self.user.key],
)
def _test_inbox_reply(self, reply, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/post')
mock_get.side_effect = (
(list(mock_get.side_effect) if mock_get.side_effect else [])
+ [
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
WEBMENTION_DISCOVERY,
])
mock_post.return_value = requests_response()
got = self.post('/ap/web/user.com/inbox', json=reply)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
self.assert_req(mock_get, 'https://user.com/post')
convert_id = reply['id'].replace('://', ':/')
if reply['type'] != 'Create':
convert_id += '%23bridgy-fed-create'
self.assert_req(
mock_post,
'https://user.com/webmention',
headers={'Accept': '*/*'},
allow_redirects=False,
data={
'source': f'https://ap.brid.gy/convert/web/{convert_id}',
'target': 'https://user.com/post',
},
)
def test_inbox_reply_to_self_domain(self, *mocks):
self._test_inbox_ignore_reply_to('http://localhost/mas.to', *mocks)
def test_inbox_reply_to_in_blocklist(self, *mocks):
self._test_inbox_ignore_reply_to('https://twitter.com/foo', *mocks)
def _test_inbox_ignore_reply_to(self, reply_to, mock_head, mock_get, mock_post):
reply = copy.deepcopy(REPLY_OBJECT)
reply['inReplyTo'] = reply_to
mock_head.return_value = requests_response(url='http://mas.to/')
mock_get.side_effect = [
# actor fetch
self.as2_resp(ACTOR),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
]
got = self.post('/user.com/inbox', json=reply)
self.assertEqual(204, got.status_code, got.get_data(as_text=True))
mock_post.assert_not_called()
def test_individual_inbox_create_obj(self, *mocks):
self._test_inbox_create_obj('/user.com/inbox', *mocks)
def test_shared_inbox_create_obj(self, *mocks):
self._test_inbox_create_obj('/inbox', *mocks)
def _test_inbox_create_obj(self, path, mock_head, mock_get, mock_post):
swentel = self.make_user('https://mas.to/users/swentel', cls=ActivityPub)
Follower.get_or_create(to=swentel, from_=self.user)
bar = self.make_user('fake:bar', cls=Fake, obj_id='fake:bar')
Follower.get_or_create(to=self.make_user('https://other.actor',
cls=ActivityPub),
from_=bar)
baz = self.make_user('fake:baz', cls=Fake, obj_id='fake:baz')
Follower.get_or_create(to=swentel, from_=baz)
baj = self.make_user('fake:baj', cls=Fake, obj_id='fake:baj')
Follower.get_or_create(to=swentel, from_=baj, status='inactive')
mock_head.return_value = requests_response(url='http://target')
mock_get.return_value = self.as2_resp(ACTOR) # source actor
mock_post.return_value = requests_response()
got = self.post(path, json=NOTE)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
expected_obj = {
**as2.to_as1(NOTE_OBJECT),
'author': {'id': 'https://masto.foo/@author'},
}
self.assert_object(NOTE_OBJECT['id'],
source_protocol='activitypub',
our_as1=expected_obj,
type='note',
feed=[self.user.key, baz.key])
expected_create = as2.to_as1(common.redirect_unwrap(NOTE))
expected_create.update({
'actor': as2.to_as1(ACTOR),
'object': expected_obj,
})
self.assert_object('http://mas.to/note/as2',
source_protocol='activitypub',
our_as1=expected_create,
users=[ndb.Key(ActivityPub, 'https://masto.foo/@author')],
type='post',
object_ids=[NOTE_OBJECT['id']],
status='complete',
delivered=['shared:target'],
delivered_protocol='fake')
def test_repost_of_indieweb(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/orig')
mock_get.return_value = WEBMENTION_DISCOVERY
mock_post.return_value = requests_response() # webmention
orig_url = 'https://user.com/orig'
note = {
**NOTE_OBJECT,
'id': 'https://user.com/orig',
}
del note['url']
Object(id=orig_url, mf2=microformats2.object_to_json(as2.to_as1(note)),
source_protocol='web').put()
repost = copy.deepcopy(REPOST_FULL)
repost['object'] = f'http://localhost/r/{orig_url}'
got = self.post('/user.com/inbox', json=repost)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
convert_id = REPOST['id'].replace('://', ':/')
self.assert_req(
mock_post,
'https://user.com/webmention',
headers={'Accept': '*/*'},
allow_redirects=False,
data={
'source': f'https://ap.brid.gy/convert/web/{convert_id}',
'target': orig_url,
},
)
self.assert_object(REPOST_FULL['id'],
source_protocol='activitypub',
status='complete',
as2={
**REPOST,
'actor': ACTOR,
'object': orig_url,
},
users=[self.swentel_key],
delivered=['https://user.com/orig'],
type='share',
object_ids=['https://user.com/orig'])
def test_shared_inbox_repost_of_fediverse(self, mock_head, mock_get, mock_post):
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']),
from_=self.user)
baz = self.make_user('fake:baz', cls=Fake, obj_id='fake:baz')
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']), from_=baz)
baj = self.make_user('fake:baj', cls=Fake, obj_id='fake:baj')
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']),
from_=baj, status='inactive')
mock_head.return_value = requests_response(url='http://target')
mock_get.side_effect = [
self.as2_resp(ACTOR), # source actor
self.as2_resp(NOTE_OBJECT), # object of repost
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
HTML, # no webmention endpoint
]
got = self.post('/ap/sharedInbox', json=REPOST)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
mock_post.assert_not_called() # no webmention
self.assert_object(REPOST['id'],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1({**REPOST, 'actor': ACTOR}),
users=[self.swentel_key],
feed=[self.user.key, baz.key],
delivered=['shared:target'],
delivered_protocol='fake',
type='share',
object_ids=[REPOST['object']])
def test_inbox_no_user(self, mock_head, mock_get, mock_post):
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
# target post webmention discovery
HTML,
]
got = self.post('/ap/sharedInbox', json={
**LIKE,
'object': 'http://nope.com/post',
})
self.assertEqual(204, got.status_code)
self.assert_object('http://mas.to/like#ok',
# no nope.com Web user key since it didn't exist
source_protocol='activitypub',
status='ignored',
our_as1=as2.to_as1({
**LIKE_WITH_ACTOR,
'object': 'http://nope.com/post',
}),
type='like',
notify=[self.user.key],
users=[self.masto_actor_key],
object_ids=['http://nope.com/post'])
def test_inbox_not_public(self, mock_head, mock_get, mock_post):
Follower.get_or_create(to=ActivityPub.get_or_create(ACTOR['id']),
from_=self.user)
mock_head.return_value = requests_response(url='http://target')
mock_get.return_value = self.as2_resp(ACTOR) # source actor
not_public = copy.deepcopy(NOTE)
del not_public['object']['to']
got = self.post('/user.com/inbox', json=not_public)
self.assertEqual(200, got.status_code, got.get_data(as_text=True))
self.assertIsNone(Object.get_by_id(not_public['id']))
self.assertIsNone(Object.get_by_id(not_public['object']['id']))
def test_inbox_like(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/post')
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=LIKE)
self.assertEqual(200, got.status_code)
self.assertIn(self.as2_req('https://mas.to/actor'), mock_get.mock_calls)
self.assertIn(self.req('https://user.com/post'), mock_get.mock_calls)
args, kwargs = mock_post.call_args
self.assertEqual(('https://user.com/webmention',), args)
self.assertEqual({
'source': 'https://ap.brid.gy/convert/web/http:/mas.to/like%23ok',
'target': 'https://user.com/post',
}, kwargs['data'])
self.assert_object('http://mas.to/like#ok',
notify=[self.user.key],
users=[self.masto_actor_key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1(LIKE_WITH_ACTOR),
delivered=['https://user.com/post'],
type='like',
object_ids=[LIKE['object']])
def test_inbox_like_indirect_user_creates_User(self, mock_get, *_):
self.user.direct = False
self.user.put()
mock_get.return_value = self.as2_resp(LIKE_ACTOR)
self.test_inbox_like()
self.assert_user(ActivityPub, 'https://mas.to/actor',
obj_as2=LIKE_ACTOR, direct=True)
def test_inbox_follow_accept_with_id(self, *mocks):
self._test_inbox_follow_accept(FOLLOW_WRAPPED, ACCEPT, 200, *mocks)
follow = {
**FOLLOW_WITH_ACTOR,
'url': 'https://mas.to/users/swentel#followed-https://user.com/',
}
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1(follow),
delivered=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def test_inbox_follow_accept_with_object(self, *mocks):
follow = {
**FOLLOW,
'object': {
'id': FOLLOW['object'],
'url': FOLLOW['object'],
},
}
self._test_inbox_follow_accept(follow, ACCEPT, 200, *mocks)
follow.update({
'actor': ACTOR,
'url': 'https://mas.to/users/swentel#followed-https://user.com/',
})
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1(follow),
delivered=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def test_inbox_follow_accept_shared_inbox(self, *mocks):
self._test_inbox_follow_accept(FOLLOW_WRAPPED, ACCEPT, 200, *mocks,
inbox_path='/ap/sharedInbox')
url = 'https://mas.to/users/swentel#followed-https://user.com/'
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='complete',
our_as1=as2.to_as1({**FOLLOW_WITH_ACTOR, 'url': url}),
delivered=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def test_inbox_follow_accept_webmention_fails(self, mock_head, mock_get,
mock_post):
mock_post.side_effect = [
requests_response(), # AP Accept
requests.ConnectionError(), # webmention
]
self._test_inbox_follow_accept(FOLLOW_WRAPPED, ACCEPT, 502,
mock_head, mock_get, mock_post)
url = 'https://mas.to/users/swentel#followed-https://user.com/'
self.assert_object('https://mas.to/6d1a',
users=[self.swentel_key],
notify=[self.user.key],
source_protocol='activitypub',
status='failed',
our_as1=as2.to_as1({**FOLLOW_WITH_ACTOR, 'url': url}),
delivered=[],
failed=['https://user.com/'],
type='follow',
object_ids=[FOLLOW['object']])
def _test_inbox_follow_accept(self, follow_as2, accept_as2, expected_status,
mock_head, mock_get, mock_post,
inbox_path='/user.com/inbox'):
# this should makes us make the follower ActivityPub as direct=True
self.user.direct = False
self.user.put()
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
# source actor
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
if not mock_post.return_value and not mock_post.side_effect:
mock_post.return_value = requests_response()
got = self.post(inbox_path, json=follow_as2)
self.assertEqual(expected_status, got.status_code)
mock_get.assert_has_calls((
self.as2_req(FOLLOW['actor']),
))
# check AP Accept
self.assertEqual(2, len(mock_post.call_args_list))
args, kwargs = mock_post.call_args_list[0]
self.assertEqual(('http://mas.to/inbox',), args)
self.assertEqual(accept_as2, json_loads(kwargs['data']))
# check webmention
args, kwargs = mock_post.call_args_list[1]
self.assertEqual(('https://user.com/webmention',), args)
self.assertEqual({
'source': 'https://ap.brid.gy/convert/web/https:/mas.to/6d1a',
'target': 'https://user.com/',
}, kwargs['data'])
# check that we stored Follower and ActivityPub user for the follower
self.assert_entities_equal(
Follower(to=self.user.key,
from_=ActivityPub(id=ACTOR['id']).key,
status='active',
follow=Object(id=FOLLOW['id']).key),
Follower.query().fetch(),
ignore=['created', 'updated'])
self.assert_user(ActivityPub, 'https://mas.to/users/swentel',
obj_as2=ACTOR, direct=True)
self.assert_user(Web, 'user.com', direct=False,
has_hcard=True, has_redirects=True)
def test_inbox_follow_use_instead_strip_www(self, mock_head, mock_get, mock_post):
self.make_user('www.user.com', use_instead=self.user.key)
mock_head.return_value = requests_response(url='https://www.user.com/')
mock_get.side_effect = [
# source actor
self.as2_resp(ACTOR),
# target post webmention discovery
requests_response('<html></html>'),
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(204, got.status_code)
follower = Follower.query().get()
self.assert_entities_equal(
Follower(to=self.user.key,
from_=ActivityPub(id=ACTOR['id']).key,
status='active',
follow=Object(id=FOLLOW['id']).key),
follower,
ignore=['created', 'updated'])
# double check that Follower doesn't have www
self.assertEqual('user.com', follower.to.id())
# double check that follow Object doesn't have www
self.assertEqual('active', follower.status)
self.assertEqual('https://mas.to/users/swentel#followed-https://user.com/',
follower.follow.get().as2['url'])
def test_inbox_undo_follow(self, mock_head, mock_get, mock_post):
follower = Follower(to=self.user.key,
from_=ActivityPub.get_or_create(ACTOR['id']).key,
status='active')
follower.put()
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=UNDO_FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
# check that the Follower is now inactive
self.assertEqual('inactive', follower.key.get().status)
def test_inbox_follow_inactive(self, mock_head, mock_get, mock_post):
follower = Follower.get_or_create(to=self.user,
from_=ActivityPub.get_or_create(ACTOR['id']),
status='inactive')
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
# source actor
self.as2_resp(FOLLOW_WITH_ACTOR['actor']),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
# check that the Follower is now active
self.assertEqual('active', follower.key.get().status)
def test_inbox_undo_follow_doesnt_exist(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
got = self.post('/user.com/inbox', json=UNDO_FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
def test_inbox_undo_follow_inactive(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
follower = Follower.get_or_create(to=self.user,
from_=ActivityPub.get_or_create(ACTOR['id']),
status='inactive')
got = self.post('/user.com/inbox', json=UNDO_FOLLOW_WRAPPED)
self.assertEqual(200, got.status_code)
self.assertEqual('inactive', follower.key.get().status)
def test_inbox_undo_follow_composite_object(self, mock_head, mock_get, mock_post):
mock_head.return_value = requests_response(url='https://user.com/')
mock_get.side_effect = [
self.as2_resp(ACTOR),
WEBMENTION_DISCOVERY,
]
mock_post.return_value = requests_response()
follower = Follower.get_or_create(to=self.user,
from_=ActivityPub.get_or_create(ACTOR['id']),
status='inactive')
undo_follow = copy.deepcopy(UNDO_FOLLOW_WRAPPED)
undo_follow['object']['object'] = {'id': undo_follow['object']['object']}
got = self.post('/user.com/inbox', json=undo_follow)
self.assertEqual(200, got.status_code)
self.assertEqual('inactive', follower.key.get().status)
def test_inbox_unsupported_type(self, *_):
got = self.post('/user.com/inbox', json={
'@context': ['https://www.w3.org/ns/activitystreams'],
'id': 'https://xoxo.zone/users/aaronpk#follows/40',
'type': 'Block',
'actor': 'https://xoxo.zone/users/aaronpk',
'object': 'http://snarfed.org/',
})
self.assertEqual(501, got.status_code)
def test_inbox_bad_object_url(self, mock_head, mock_get, mock_post):
# https://console.cloud.google.com/errors/detail/CMKn7tqbq-GIRA;time=P30D?project=bridgy-federated
mock_get.return_value = self.as2_resp(ACTOR) # source actor
id = 'https://mas.to/users/tmichellemoore#likes/56486252'
bad_url = 'http://localhost/r/Testing \u2013 Brid.gy \u2013 Post to Mastodon 3'
bad = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': id,
'type': 'Like',
'actor': ACTOR['id'],
'object': bad_url,
}
got = self.post('/user.com/inbox', json=bad)
# bad object, should ignore activity
self.assertEqual(204, got.status_code)
mock_post.assert_not_called()
self.assert_object(id,
our_as1={
**as2.to_as1(bad),
'actor': as2.to_as1(ACTOR),
},
users=[self.swentel_key],
source_protocol='activitypub',
status='ignored',
)
self.assertIsNone(Object.get_by_id(bad_url))
@patch('activitypub.logger.info', side_effect=logging.info)
@patch('common.logger.info', side_effect=logging.info)
@patch('oauth_dropins.webutil.appengine_info.DEBUG', False)
def test_inbox_verify_http_signature(self, mock_common_log, mock_activitypub_log,
_, mock_get, ___):
# actor with a public key
self.key_id_obj.key.delete()
protocol.objects_cache.clear()
actor_as2 = {
**ACTOR,
'publicKey': {
'id': 'http://my/key/id#unused',
'owner': 'http://own/er',
'publicKeyPem': self.user.public_pem().decode(),
},
}
mock_get.return_value = self.as2_resp(actor_as2)
# valid signature
body = json_dumps(NOTE)
headers = self.sign('/ap/sharedInbox', json_dumps(NOTE))
resp = self.client.post('/ap/sharedInbox', data=body, headers=headers)
self.assertEqual(204, resp.status_code, resp.get_data(as_text=True))
mock_get.assert_has_calls((
self.as2_req('http://my/key/id'),
))
mock_activitypub_log.assert_any_call('HTTP Signature verified!')
# valid signature, Object has no key
self.key_id_obj.as2 = ACTOR
self.key_id_obj.put()
resp = self.client.post('/ap/sharedInbox', data=body, headers=headers)
self.assertEqual(401, resp.status_code, resp.get_data(as_text=True))
# valid signature, Object has our_as1 instead of as2
self.key_id_obj.clear()
self.key_id_obj.our_as1 = as2.to_as1(actor_as2)
self.key_id_obj.put()
resp = self.client.post('/ap/sharedInbox', data=body, headers=headers)
self.assertEqual(204, resp.status_code, resp.get_data(as_text=True))
mock_activitypub_log.assert_any_call('HTTP Signature verified!')
# invalid signature, missing keyId
protocol.seen_ids.clear()
obj_key = ndb.Key(Object, NOTE['id'])
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', data=body, headers={
**headers,
'signature': headers['signature'].replace(
'keyId="http://my/key/id#unused",', ''),
})
self.assertEqual(401, resp.status_code)
self.assertEqual({'error': 'HTTP Signature missing keyId'}, resp.json)
mock_common_log.assert_any_call('Returning 401: HTTP Signature missing keyId', exc_info=None)
# invalid signature, content changed
protocol.seen_ids.clear()
obj_key = ndb.Key(Object, NOTE['id'])
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', json={**NOTE, 'content': 'z'}, headers=headers)
self.assertEqual(401, resp.status_code)
self.assertEqual({'error': 'Invalid Digest header, required for HTTP Signature'},
resp.json)
mock_common_log.assert_any_call('Returning 401: Invalid Digest header, required for HTTP Signature', exc_info=None)
# invalid signature, header changed
protocol.seen_ids.clear()
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', data=body, headers={**headers, 'Date': 'X'})
self.assertEqual(401, resp.status_code)
self.assertEqual({'error': 'HTTP Signature verification failed'}, resp.json)
mock_common_log.assert_any_call('Returning 401: HTTP Signature verification failed', exc_info=None)
# no signature
protocol.seen_ids.clear()
obj_key.delete()
resp = self.client.post('/ap/sharedInbox', json=NOTE)
self.assertEqual(401, resp.status_code, resp.get_data(as_text=True))
self.assertEqual({'error': 'No HTTP Signature'}, resp.json)
mock_common_log.assert_any_call('Returning 401: No HTTP Signature', exc_info=None)
def test_delete_actor(self, *mocks):
follower = Follower.get_or_create(
to=self.user, from_=ActivityPub.get_or_create(DELETE['actor']))
followee = Follower.get_or_create(
to=ActivityPub.get_or_create(DELETE['actor']),
from_=Fake.get_or_create('snarfed.org'))
# other unrelated follower
other = Follower.get_or_create(
to=self.user, from_=ActivityPub.get_or_create('https://mas.to/users/other'))
self.assertEqual(3, Follower.query().count())
got = self.post('/ap/sharedInbox', json=DELETE)
self.assertEqual(204, got.status_code)
self.assertEqual('inactive', follower.key.get().status)
self.assertEqual('inactive', followee.key.get().status)
self.assertEqual('active', other.key.get().status)
def test_delete_actor_not_fetchable(self, _, mock_get, ___):
self.key_id_obj.key.delete()
protocol.objects_cache.clear()
mock_get.return_value = requests_response(status=410)
got = self.post('/ap/sharedInbox', json={**DELETE, 'object': 'http://my/key/id'})
self.assertEqual(202, got.status_code)
def test_delete_actor_empty_deleted_object(self, _, mock_get, ___):
self.key_id_obj.as2 = None
self.key_id_obj.deleted = True
self.key_id_obj.put()
protocol.objects_cache.clear()
got = self.post('/ap/sharedInbox', json={**DELETE, 'object': 'http://my/key/id'})
self.assertEqual(202, got.status_code)
mock_get.assert_not_called()
def test_delete_note(self, _, mock_get, ___):
obj = Object(id='http://an/obj')
obj.put()
mock_get.side_effect = [
self.as2_resp(ACTOR),
]
delete = {
**DELETE,
'object': 'http://an/obj',
}
resp = self.post('/ap/sharedInbox', json=delete)
self.assertEqual(204, resp.status_code)
self.assertTrue(obj.key.get().deleted)
self.assert_object(delete['id'],
our_as1={
**as2.to_as1(delete),
'actor': as2.to_as1(ACTOR),
},
type='delete',
source_protocol='activitypub',
status='ignored',
users=[ActivityPub(id='https://mas.to/users/swentel').key])
obj.populate(deleted=True, as2=None)
self.assert_entities_equal(obj,
protocol.objects_cache['http://an/obj'],
ignore=['expire', 'created', 'updated'])
def test_update_note(self, *mocks):
Object(id='https://a/note', as2={}).put()
self._test_update(*mocks)
def test_update_unknown(self, *mocks):
self._test_update(*mocks)
def _test_update(self, _, mock_get, ___):
mock_get.side_effect = [
self.as2_resp(ACTOR),
]
resp = self.post('/ap/sharedInbox', json=UPDATE_NOTE)
self.assertEqual(204, resp.status_code)
note_as1 = as2.to_as1({
**UPDATE_NOTE['object'],
'author': {'id': 'https://mas.to/users/swentel'},
})
self.assert_object('https://a/note',
type='note',
our_as1=note_as1,
source_protocol='activitypub')
update_as1 = {
**as2.to_as1(UPDATE_NOTE),
'object': note_as1,
'actor': as2.to_as1(ACTOR),
}
self.assert_object(UPDATE_NOTE['id'],
source_protocol='activitypub',
type='update',
status='ignored',
our_as1=update_as1,
users=[self.swentel_key])
self.assert_entities_equal(Object.get_by_id('https://a/note'),
protocol.objects_cache['https://a/note'])
def test_inbox_webmention_discovery_connection_fails(self, mock_head,
mock_get, mock_post):
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
# target post webmention discovery
ReadTimeoutError(None, None, None),
]
got = self.post('/user.com/inbox', json=LIKE)
self.assertEqual(502, got.status_code)
def test_inbox_no_webmention_endpoint(self, mock_head, mock_get, mock_post):
mock_get.side_effect = [
# source actor
self.as2_resp(LIKE_WITH_ACTOR['actor']),
# protocol inference
requests_response(test_web.NOTE_HTML),
requests_response(test_web.NOTE_HTML),
# target post webmention discovery
HTML,
]
got = self.post('/user.com/inbox', json=LIKE)
self.assertEqual(204, got.status_code)
self.assert_object('http://mas.to/like#ok',
notify=[self.user.key],
users=[self.masto_actor_key],
source_protocol='activitypub',
status='ignored',
our_as1=as2.to_as1(LIKE_WITH_ACTOR),
type='like',
object_ids=[LIKE['object']])
def test_inbox_id_already_seen(self, *mocks):
obj_key = Object(id=FOLLOW_WRAPPED['id'], as2={}).put()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(204, got.status_code)
self.assertEqual(0, Follower.query().count())
# second time should use in memory cache
obj_key.delete()
got = self.post('/user.com/inbox', json=FOLLOW_WRAPPED)
self.assertEqual(204, got.status_code)
self.assertEqual(0, Follower.query().count())
def test_followers_collection_unknown_user(self, *_):
resp = self.client.get('/nope.com/followers')
self.assertEqual(404, resp.status_code)
def test_followers_collection_empty(self, *_):
resp = self.client.get('/user.com/followers')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/followers',
'type': 'Collection',
'summary': "user.com's followers",
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/followers',
'items': [],
},
}, resp.json)
def store_followers(self):
follow = Object(id=FOLLOW_WITH_ACTOR['id'], as2=FOLLOW_WITH_ACTOR).put()
Follower.get_or_create(
to=self.user,
from_=self.make_user('http://bar', cls=ActivityPub, obj_as2=ACTOR),
follow=follow)
Follower.get_or_create(
to=self.make_user('https://other.actor', cls=ActivityPub),
from_=self.user)
Follower.get_or_create(
to=self.user,
from_=self.make_user('http://baz', cls=ActivityPub, obj_as2=ACTOR),
follow=follow)
Follower.get_or_create(
to=self.user,
from_=self.make_user('http://baj', cls=Fake),
status='inactive')
def test_followers_collection_fake(self, *_):
self.make_user('foo.com', cls=Fake)
resp = self.client.get('/ap/fake/foo.com/followers')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/ap/fake/foo.com/followers',
'type': 'Collection',
'summary': "foo.com's followers",
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/ap/fake/foo.com/followers',
'items': [],
},
}, resp.json)
def test_followers_collection(self, *_):
self.store_followers()
resp = self.client.get('/user.com/followers')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/followers',
'type': 'Collection',
'summary': "user.com's followers",
'totalItems': 2,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/followers',
'items': [ACTOR, ACTOR],
},
}, resp.json)
@patch('models.PAGE_SIZE', 1)
def test_followers_collection_page(self, *_):
self.store_followers()
before = (datetime.utcnow() + timedelta(seconds=1)).isoformat()
next = Follower.query(Follower.from_ == ActivityPub(id='http://baz').key,
Follower.to == self.user.key,
).get().updated.isoformat()
resp = self.client.get(f'/user.com/followers?before={before}')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': f'http://localhost/user.com/followers?before={before}',
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/followers',
'next': f'http://localhost/user.com/followers?before={next}',
'prev': f'http://localhost/user.com/followers?after={before}',
'items': [ACTOR],
}, resp.json)
def test_following_collection_unknown_user(self, *_):
resp = self.client.get('/nope.com/following')
self.assertEqual(404, resp.status_code)
def test_following_collection_empty(self, *_):
resp = self.client.get('/user.com/following')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/following',
'summary': "user.com's following",
'type': 'Collection',
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/following',
'items': [],
},
}, resp.json)
def store_following(self):
follow = Object(id=FOLLOW_WITH_ACTOR['id'], as2=FOLLOW_WITH_ACTOR).put()
Follower.get_or_create(
to=self.make_user('http://bar', cls=ActivityPub, obj_as2=ACTOR),
from_=self.user,
follow=follow)
Follower.get_or_create(
to=self.user,
from_=self.make_user('https://other.actor', cls=ActivityPub))
Follower.get_or_create(
to=self.make_user('http://baz', cls=ActivityPub, obj_as2=ACTOR),
from_=self.user, follow=follow)
Follower.get_or_create(
to=self.make_user('http://baj', cls=ActivityPub),
from_=self.user,
status='inactive')
def test_following_collection(self, *_):
self.store_following()
resp = self.client.get('/user.com/following')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/following',
'summary': "user.com's following",
'type': 'Collection',
'totalItems': 2,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/following',
'items': [ACTOR, ACTOR],
},
}, resp.json)
@patch('models.PAGE_SIZE', 1)
def test_following_collection_page(self, *_):
self.store_following()
after = datetime(1900, 1, 1).isoformat()
prev = Follower.query(Follower.to == ActivityPub(id='http://baz').key,
Follower.from_ == self.user.key,
).get().updated.isoformat()
resp = self.client.get(f'/user.com/following?after={after}')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': f'http://localhost/user.com/following?after={after}',
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/following',
'prev': f'http://localhost/user.com/following?after={prev}',
'next': f'http://localhost/user.com/following?before={after}',
'items': [ACTOR],
}, resp.json)
def test_outbox_fake(self, *_):
self.make_user('foo.com', cls=Fake)
resp = self.client.get(f'/ap/fake/foo.com/outbox')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/ap/fake/foo.com/outbox',
'summary': "foo.com's outbox",
'type': 'OrderedCollection',
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/ap/fake/foo.com/outbox',
'items': [],
},
}, resp.json)
def test_outbox_web(self, *_):
resp = self.client.get(f'/user.com/outbox')
self.assertEqual(200, resp.status_code)
self.assertEqual({
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'http://localhost/user.com/outbox',
'summary': "user.com's outbox",
'type': 'OrderedCollection',
'totalItems': 0,
'first': {
'type': 'CollectionPage',
'partOf': 'http://localhost/user.com/outbox',
'items': [],
},
}, resp.json)
class ActivityPubUtilsTest(TestCase):
def setUp(self):
super().setUp()
g.user = self.make_user('user.com', has_hcard=True, obj_as2=ACTOR)
def test_put_validates_id(self, *_):
for bad in (
'',
'not a url',
'ftp://not.web/url',
'https:///no/domain',
'https://fed.brid.gy/foo',
'https://ap.brid.gy/foo',
'http://localhost/foo',
):
with self.assertRaises(AssertionError):
ActivityPub(id=bad).put()
def test_owns_id(self):
self.assertIsNone(ActivityPub.owns_id('http://foo'))
self.assertIsNone(ActivityPub.owns_id('https://bar/baz'))
self.assertFalse(ActivityPub.owns_id('at://did:plc:foo/bar/123'))
self.assertFalse(ActivityPub.owns_id('e45fab982'))
self.assertFalse(ActivityPub.owns_id('https://twitter.com/foo'))
self.assertFalse(ActivityPub.owns_id('https://fed.brid.gy/foo'))
def test_postprocess_as2_multiple_in_reply_tos(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'inReplyTo': 'foo',
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'inReplyTo': ['foo', 'bar'],
}))
def test_postprocess_as2_multiple_url(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'url': ['http://localhost/r/foo', 'http://localhost/r/bar'],
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'url': ['foo', 'bar'],
}))
def test_postprocess_as2_multiple_image(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'attachment': [{'url': 'http://r/foo'}, {'url': 'http://r/bar'}],
'image': [{'url': 'http://r/foo'}, {'url': 'http://r/bar'}],
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'image': [{'url': 'http://r/foo'}, {'url': 'http://r/bar'}],
}))
def test_postprocess_as2_actor_attributedTo(self):
g.user = Fake(id='site')
self.assert_equals({
'actor': {
'id': 'baj',
'preferredUsername': 'site',
'url': 'http://localhost/r/site',
'inbox': 'http://bf/fake/site/ap/inbox',
'outbox': 'http://bf/fake/site/ap/outbox',
},
'attributedTo': [{
'id': 'bar',
'preferredUsername': 'site',
'url': 'http://localhost/r/site',
'inbox': 'http://bf/fake/site/ap/inbox',
'outbox': 'http://bf/fake/site/ap/outbox',
}, {
'id': 'baz',
'preferredUsername': 'site',
'url': 'http://localhost/r/site',
'inbox': 'http://bf/fake/site/ap/inbox',
'outbox': 'http://bf/fake/site/ap/outbox',
}],
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'attributedTo': [{'id': 'bar'}, {'id': 'baz'}],
'actor': {'id': 'baj'},
}))
def test_postprocess_as2_note(self):
self.assert_equals({
'id': 'http://localhost/r/xyz',
'type': 'Note',
'to': [as2.PUBLIC_AUDIENCE],
}, postprocess_as2({
'id': 'xyz',
'type': 'Note',
}))
def test_postprocess_as2_hashtag(self):
"""https://github.com/snarfed/bridgy-fed/issues/45"""
self.assert_equals({
'tag': [
{'type': 'Hashtag', 'name': '#bar', 'href': 'bar'},
{'type': 'Hashtag', 'name': '#baz', 'href': 'http://localhost/hashtag/baz'},
{'type': 'Mention', 'href': 'foo'},
],
'to': ['https://www.w3.org/ns/activitystreams#Public'],
}, postprocess_as2({
'tag': [
{'name': 'bar', 'href': 'bar'},
{'type': 'Tag', 'name': '#baz'},
# should leave alone
{'type': 'Mention', 'href': 'foo'},
],
}))
def test_postprocess_as2_url_attachments(self):
got = postprocess_as2(as2.from_as1({
'objectType': 'person',
'urls': [
{
'value': 'https://user.com/about-me',
'displayName': 'Mrs. \u2615 Foo',
}, {
'value': 'https://user.com/',
'displayName': 'should be ignored',
}, {
'value': 'http://one',
'displayName': 'one text',
}, {
'value': 'https://two',
'displayName': 'two title',
},
]
}))
self.assert_equals([{
'type': 'PropertyValue',
'name': 'Mrs. ☕ Foo',
'value': '<a rel="me" href="https://user.com/about-me"><span class="invisible">https://</span>user.com/about-me<span class="invisible"></span></a>',
}, {
'type': 'PropertyValue',
'name': 'Web site',
'value': '<a rel="me" href="https://user.com/"><span class="invisible">https://</span>user.com<span class="invisible">/</span></a>',
}, {
'type': 'PropertyValue',
'name': 'one text',
'value': '<a rel="me" href="http://one"><span class="invisible">http://</span>one<span class="invisible"></span></a>',
}, {
'type': 'PropertyValue',
'name': 'two title',
'value': '<a rel="me" href="https://two"><span class="invisible">https://</span>two<span class="invisible"></span></a>',
}], got['attachment'])
def test_postprocess_as2_preserves_preferredUsername(self):
# preferredUsername stays y.z despite user's username. since Mastodon
# queries Webfinger for [email protected]
# https://github.com/snarfed/bridgy-fed/issues/77#issuecomment-949955109
self.assertEqual('user.com', postprocess_as2({
'type': 'Person',
'url': 'https://user.com/about-me',
'preferredUsername': 'nick',
'attachment': [{
'type': 'PropertyValue',
'name': 'nick',
'value': '<a rel="me" href="https://user.com/about-me"><span class="invisible">https://</span>user.com/about-me<span class="invisible"></span></a>',
}],
})['preferredUsername'])
# TODO: make these generic and use Fake
@patch('requests.get')
def test_load_http(self, mock_get):
mock_get.return_value = AS2
id = 'http://the/id'
self.assertIsNone(Object.get_by_id(id))
# first time fetches over HTTP
got = ActivityPub.load(id)
self.assert_equals(id, got.key.id())
self.assert_equals(AS2_OBJ, got.as2)
mock_get.assert_has_calls([self.as2_req(id)])
# second time is in cache
got.key.delete()
mock_get.reset_mock()
got = ActivityPub.load(id)
self.assert_equals(id, got.key.id())
self.assert_equals(AS2_OBJ, got.as2)
mock_get.assert_not_called()
@patch('requests.get')
def test_load_datastore(self, mock_get):
id = 'http://the/id'
stored = Object(id=id, as2=AS2_OBJ)
stored.put()
protocol.objects_cache.clear()
# first time loads from datastore
got = ActivityPub.load(id)
self.assert_entities_equal(stored, got)
mock_get.assert_not_called()
# second time is in cache
stored.key.delete()
got = ActivityPub.load(id)
self.assert_entities_equal(stored, got)
mock_get.assert_not_called()
@patch('requests.get')
def test_load_preserves_fragment(self, mock_get):
stored = Object(id='http://the/id#frag', as2=AS2_OBJ)
stored.put()
protocol.objects_cache.clear()
got = ActivityPub.load('http://the/id#frag')
self.assert_entities_equal(stored, got)
mock_get.assert_not_called()
@patch('requests.get')
def test_load_datastore_no_as2(self, mock_get):
"""If the stored Object has no as2, we should fall back to HTTP."""
id = 'http://the/id'
stored = Object(id=id, as2={}, status='in progress')
stored.put()
protocol.objects_cache.clear()
mock_get.return_value = AS2
got = ActivityPub.load(id)
mock_get.assert_has_calls([self.as2_req(id)])
self.assert_equals(id, got.key.id())
self.assert_equals(AS2_OBJ, got.as2)
mock_get.assert_has_calls([self.as2_req(id)])
self.assert_object(id,
as2=AS2_OBJ,
as1={**AS2_OBJ, 'id': id},
source_protocol='activitypub',
# check that it reused our original Object
status='in progress')
@patch('requests.get')
def test_signed_get_redirects_manually_with_new_sig_headers(self, mock_get):
mock_get.side_effect = [
requests_response(status=302, redirected_url='http://second',
allow_redirects=False),
requests_response(status=200, allow_redirects=False),
]
activitypub.signed_get('https://first')
first = mock_get.call_args_list[0][1]
second = mock_get.call_args_list[1][1]
self.assertNotEqual(first['headers'], second['headers'])
@patch('requests.get')
def test_signed_get_redirects_to_relative_url(self, mock_get):
mock_get.side_effect = [
# redirected URL is relative, we have to resolve it
requests_response(status=302, redirected_url='/second',
allow_redirects=False),
requests_response(status=200, allow_redirects=False),
]
activitypub.signed_get('https://first')
self.assertEqual(('https://first/second',), mock_get.call_args_list[1][0])
first = mock_get.call_args_list[0][1]
second = mock_get.call_args_list[1][1]
# headers are equal because host is the same
self.assertEqual(first['headers'], second['headers'])
self.assertEqual(
first['auth'].header_signer.sign(first['headers'], method='GET', path='/'),
second['auth'].header_signer.sign(second['headers'], method='GET', path='/'))
@patch('requests.post')
def test_signed_post_ignores_redirect(self, mock_post):
mock_post.side_effect = [
requests_response(status=302, redirected_url='http://second',
allow_redirects=False),
]
resp = activitypub.signed_post('https://first')
mock_post.assert_called_once()
self.assertEqual(302, resp.status_code)
@patch('requests.get')
def test_fetch_direct(self, mock_get):
mock_get.return_value = AS2
obj = Object(id='http://orig')
ActivityPub.fetch(obj)
self.assertEqual(AS2_OBJ, obj.as2)
mock_get.assert_has_calls((
self.as2_req('http://orig'),
))
@patch('requests.get')
def test_fetch_direct_ld_content_type(self, mock_get):
mock_get.return_value = requests_response(AS2_OBJ, headers={
'Content-Type': 'application/ld+json; profile="https://www.w3.org/ns/activitystreams"',
})
obj = Object(id='http://orig')
ActivityPub.fetch(obj)
self.assertEqual(AS2_OBJ, obj.as2)
mock_get.assert_has_calls((
self.as2_req('http://orig'),
))
@patch('requests.get')
def test_fetch_via_html(self, mock_get):
mock_get.side_effect = [HTML_WITH_AS2, AS2]
obj = Object(id='http://orig')
ActivityPub.fetch(obj)
self.assertEqual(AS2_OBJ, obj.as2)
mock_get.assert_has_calls((
self.as2_req('http://orig'),
self.as2_req('http://as2', headers=as2.CONNEG_HEADERS),
))
@patch('requests.get')
def test_fetch_only_html(self, mock_get):
mock_get.return_value = HTML
obj = Object(id='http://orig')
self.assertFalse(ActivityPub.fetch(obj))
self.assertIsNone(obj.as1)
@patch('requests.get')
def test_fetch_not_acceptable(self, mock_get):
mock_get.return_value = NOT_ACCEPTABLE
obj = Object(id='http://orig')
self.assertFalse(ActivityPub.fetch(obj))
self.assertIsNone(obj.as1)
@patch('requests.get')
def test_fetch_ssl_error(self, mock_get):
mock_get.side_effect = requests.exceptions.SSLError
with self.assertRaises(BadGateway):
ActivityPub.fetch(Object(id='http://orig'))
@patch('requests.get')
def test_fetch_no_content(self, mock_get):
mock_get.return_value = self.as2_resp('')
with self.assertRaises(BadGateway):
ActivityPub.fetch(Object(id='http://the/id'))
mock_get.assert_has_calls([self.as2_req('http://the/id')])
@patch('requests.get')
def test_fetch_not_json(self, mock_get):
mock_get.return_value = self.as2_resp('XYZ not JSON')
with self.assertRaises(BadGateway):
ActivityPub.fetch(Object(id='http://the/id'))
mock_get.assert_has_calls([self.as2_req('http://the/id')])
def test_fetch_non_url(self):
obj = Object(id='x y z')
self.assertFalse(ActivityPub.fetch(obj))
self.assertIsNone(obj.as1)
@skip
def test_serve(self):
obj = Object(id='http://orig', as2=LIKE)
self.assertEqual((LIKE_WRAPPED, {'Content-Type': 'application/activity+json'}),
ActivityPub.serve(obj))
def test_postprocess_as2_idempotent(self):
g.user = self.make_user('foo.com')
for obj in (ACTOR, REPLY_OBJECT, REPLY_OBJECT_WRAPPED, REPLY,
NOTE_OBJECT, NOTE, MENTION_OBJECT, MENTION, LIKE,
LIKE_WRAPPED, REPOST, FOLLOW, FOLLOW_WRAPPED, ACCEPT,
UNDO_FOLLOW_WRAPPED, DELETE, UPDATE_NOTE,
# TODO: these currently fail
# LIKE_WITH_ACTOR, REPOST_FULL, FOLLOW_WITH_ACTOR,
# FOLLOW_WRAPPED_WITH_ACTOR, FOLLOW_WITH_OBJECT, UPDATE_PERSON,
):
with self.subTest(obj=obj):
obj = copy.deepcopy(obj)
self.assert_equals(postprocess_as2(obj),
postprocess_as2(postprocess_as2(obj)),
ignore=['to'])
def test_ap_address(self):
user = ActivityPub(obj=Object(id='a', as2={**ACTOR, 'preferredUsername': 'me'}))
self.assertEqual('@[email protected]', user.ap_address())
self.assertEqual('@[email protected]', user.readable_id)
user.obj.as2 = ACTOR
self.assertEqual('@[email protected]', user.ap_address())
self.assertEqual('@[email protected]', user.readable_id)
user = ActivityPub(id='https://mas.to/users/alice')
self.assertEqual('@[email protected]', user.ap_address())
self.assertEqual('@[email protected]', user.readable_id)
def test_ap_actor(self):
user = self.make_user('http://foo/actor', cls=ActivityPub)
self.assertEqual('http://foo/actor', user.ap_actor())
def test_web_url(self):
user = self.make_user('http://foo/actor', cls=ActivityPub)
self.assertEqual('http://foo/actor', user.web_url())
user.obj = Object(id='a', as2=copy.deepcopy(ACTOR)) # no url
self.assertEqual('http://foo/actor', user.web_url())
user.obj.as2['url'] = ['http://my/url']
self.assertEqual('http://my/url', user.web_url())
def test_readable_id(self):
user = self.make_user('http://foo', cls=ActivityPub)
self.assertIsNone(user.readable_id)
self.assertEqual('http://foo', user.readable_or_key_id())
user.obj = Object(id='a', as2=ACTOR)
self.assertEqual('@[email protected]', user.readable_id)
self.assertEqual('@[email protected]', user.readable_or_key_id())
@skip
def test_target_for_not_activitypub(self):
with self.assertRaises(AssertionError):
ActivityPub.target_for(Object(source_protocol='web'))
def test_target_for_actor(self):
self.assertEqual(ACTOR['inbox'], ActivityPub.target_for(
Object(source_protocol='ap', as2=ACTOR)))
actor = copy.deepcopy(ACTOR)
del actor['inbox']
self.assertIsNone(ActivityPub.target_for(
Object(source_protocol='ap', as2=actor)))
actor['publicInbox'] = 'so-public'
self.assertEqual('so-public', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor)))
# sharedInbox
self.assertEqual('so-public', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor), shared=True))
actor['endpoints'] = {
'sharedInbox': 'so-shared',
}
self.assertEqual('so-public', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor)))
self.assertEqual('so-shared', ActivityPub.target_for(
Object(source_protocol='ap', as2=actor), shared=True))
def test_target_for_object(self):
obj = Object(as2=NOTE_OBJECT, source_protocol='ap')
self.assertIsNone(ActivityPub.target_for(obj))
Object(id=ACTOR['id'], as2=ACTOR).put()
obj.as2 = {
**NOTE_OBJECT,
'author': ACTOR['id'],
}
self.assertEqual('http://mas.to/inbox', ActivityPub.target_for(obj))
del obj.as2['author']
obj.as2['actor'] = copy.deepcopy(ACTOR)
obj.as2['actor']['url'] = [obj.as2['actor'].pop('id')]
self.assertEqual('http://mas.to/inbox', ActivityPub.target_for(obj))
@patch('requests.get')
def test_target_for_object_fetch(self, mock_get):
mock_get.return_value = self.as2_resp(ACTOR)
obj = Object(as2={
**NOTE_OBJECT,
'author': 'http://the/author',
}, source_protocol='ap')
self.assertEqual('http://mas.to/inbox', ActivityPub.target_for(obj))
mock_get.assert_has_calls([self.as2_req('http://the/author')])
@patch('requests.get')
def test_target_for_author_is_object_id(self, mock_get):
obj = self.store_object(id='http://the/author', our_as1={
'author': 'http://the/author',
})
# test is that we short circuit out instead of infinite recursion
self.assertIsNone(ActivityPub.target_for(obj))
@patch('requests.post')
def test_send_blocklisted(self, mock_post):
self.assertFalse(ActivityPub.send(Object(as2=NOTE),
'https://fed.brid.gy/ap/sharedInbox'))
mock_post.assert_not_called()
|
snarfed/bridgy-fed
|
tests/test_activitypub.py
|
test_activitypub.py
|
py
| 76,984 |
python
|
en
|
code
| 219 |
github-code
|
6
|
16908443054
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys
import os
from os import path
import numpy as np
from PIL import Image
import datetime
import matplotlib.pyplot as plt
import time
sys.path.append(os.getcwd() + "/lib/wordcloud")
from wordcloud import WordCloud
text = "初鳩,初花,初針,初春,初日,初日影,初飛行,初披講,初日の出,初雲雀,初富士,初冬,初風呂,初箒,初蛍,初参り,初松風,初神籤,初彌撒,春小袖," \
"春炬燵,春寒,春雨,春芝居,春近し,春隣,春の馬,春の海,春の蚊,春の風,春の川,春の草,春の雲,春の暮,春の蟬,春の空,春の田,春の月,春の虹," \
"春の野,春の日,春の服,春,2,3,春の星,はるのほし,春,2,3,春の水,はるのみず,春,2,3,春の山,はるのやま,春,2,3,春の闇,はるのやみ,春," \
"2,3,春の雪,はるのゆき,春,2,3,春の夜,はるのよ,春,2,春の宵,はるのよい,2,3,春日傘,はるひがさ,春,2,3,春深し,はるふかし,春,2,3,春待つ," \
"はるまつ,冬,2,春祭,はるまつり,春,2,春めく,はるめく,春,2,春休み,はるやすみ,春,2,春夕焼,はるゆうやけ,春,2,バレンタインデー," \
"ばれんたいんでー,春,晩夏,ばんか,夏,晩菊,ばんぎく,秋,万愚節,ばんぐせつ,春,半夏,はんげ,夏,半夏生,はんげしょう,夏,パンジー,ぱんじー," \
"春,晩秋,ばんしゅう,秋,晩春,ばんしゅん,春,半仙戯,はんせんぎ,春,晩霜,ばんそう,春,斑猫,はんみょう,夏,晩涼,ばんりょう,夏,万緑," \
"ばんりょく,夏,日脚伸ぶ,ひあしのぶ,ビーチパラソル,びーちぱらそる,夏,ひひな,ひいな,春,柊,ひいらぎ,冬,麦酒,びーる,夏".encode('utf-8')
d = path.dirname(__file__)
start_day = datetime.date.today()
logo_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/logo_mask.png")))
spring_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/spring-mask.png")))
newyear_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/newyear-mask.png")))
summer_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/summer-mask.png")))
autumn_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/autumn-mask.png")))
winter_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/winter-mask.png")))
total_f = open("./text/total_wordcloud.txt")
total_text = total_f.read()
weekly_f = open("./text/weekly_wordcloud.txt")
weekly_text = weekly_f.read()
monthly_f = open("./text/monthly_wordcloud.txt")
monthly_text = monthly_f.read()
newyear_f = open("./text/newyear_wordcloud.txt")
newyear_text = newyear_f.read()
spring_f = open("./text/spring_wordcloud.txt")
spring_text = spring_f.read()
summer_f = open("./text/summer_wordcloud.txt")
summer_text = summer_f.read()
autumn_f = open("./text/autumn_wordcloud.txt")
autumn_text = autumn_f.read()
winter_f = open("./text/winter_wordcloud.txt")
winter_text = winter_f.read()
while True:
today = datetime.date.today()
wordcloud = WordCloud(background_color="lightcyan", mode="RGB", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=logo_mask, width=600, height=600, colormap="gist_rainbow").generate(total_text)
wordcloud.to_file("./image/total_wordcloud.png")
print("total_wordcloud was generated.")
wordcloud = WordCloud(background_color="lightcyan", mode="RGB", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=logo_mask, width=600, height=600, colormap="gist_rainbow").generate(text.decode('utf-8'))
wordcloud.to_file("./image/weekly_wordcloud.png")
print("weekly_wordcloud was generated.")
wordcloud = WordCloud(background_color="lightcyan", mode="RGB", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=logo_mask, width=600, height=600, colormap="gist_rainbow").generate(text.decode('utf-8'))
wordcloud.to_file("./image/monthly_wordcloud.png")
print("monthly_wordcloud was generated.")
wordcloud = WordCloud(background_color="moccasin", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=newyear_mask, width=600, height=600, colormap="Reds").generate(text.decode('utf-8'))
wordcloud.to_file("./image/newyear_wordcloud.png")
print("newyear_wordcloud was generated.")
wordcloud = WordCloud(background_color="palegreen", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=spring_mask, width=600, height=600, colormap="spring").generate(text.decode('utf-8'))
wordcloud.to_file("./image/spring_wordcloud.png")
print("spring_wordcloud was generated.")
wordcloud = WordCloud(background_color="paleturquoise", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=summer_mask, width=600, height=600, colormap="summer").generate(text.decode('utf-8'))
wordcloud.to_file("./image/summer_wordcloud.png")
print("summer_wordcloud was generated.")
wordcloud = WordCloud(background_color="darkslategray", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=autumn_mask, width=600, height=600, colormap="autumn").generate(text.decode('utf-8'))
wordcloud.to_file("./image/autumn_wordcloud.png")
print("autumn_wordcloud was generated.")
wordcloud = WordCloud(background_color="midnightblue", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=winter_mask, width=600, height=600, colormap="PuBuGn").generate(text.decode('utf-8'))
wordcloud.to_file("./image/winter_wordcloud.png")
print("winter_wordcloud was generated.")
time.sleep(86400)
|
PL2GroupJ/PyWordCloud
|
wc.py
|
wc.py
|
py
| 5,758 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35941373968
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Filters results from taxonomic classifiers and extracts taxonomic IDs for filtered hits"""
"""To do: Add argparse and logging"""
import os
import sys
import subprocess
import pandas as pd
def main():
# Input
classifier = sys.argv[1]
params = sys.argv[2]
hits_report = sys.argv[3]
ntc = None
if len(sys.argv) > 4:
ntc = sys.argv[4]
reference_summary_refseq = "ftp://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_refseq.txt"
reference_summary_genbank = "ftp://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_genbank.txt"
#reference_summary_refseq = "rsync://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_refseq.txt"
#reference_summary_genbank = "rsync://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_genbank.txt"
# Get thresholds for filtering
min_reads = ""
min_reads_w_ntc = ""
max_reads_ntc = ""
min_ratio = ""
min_reads_ratio = ""
try:
params = params.strip().split(",")
if len(params) == 1:
min_reads = int(params[0])
elif len(params) == 4:
min_reads_w_ntc = int(params[0])
max_reads_ntc = int(params[1])
min_ratio = float(params[2])
min_reads_ratio = int(params[3])
else:
print("No filter parameters specified")
sys.exit(1)
except Exception as e:
print("Failed parsing filtering options\n" + str(e))
sys.exit(1)
print("min_reads", min_reads)
print("min_reads_w_ntc", min_reads_w_ntc)
print("max_reads_ntc", max_reads_ntc)
print("min_ratio", min_ratio)
print("min_reads_ratio", min_reads_ratio)
# Output
summary_filtered = "{}_{}_filtered_taxonomic_hits.tsv".format(classifier,hits_report)
summary_skipped = "{}_{}_skipped_taxonomic_hits.tsv".format(classifier,hits_report)
# Parse hits
all_hits = get_all_hits(hits_report, classifier)
if ntc:
ntc_hits = get_all_hits(ntc, classifier)
# Filter hits
filtered_hits = {}
skipped_hits = {}
for taxid, data in all_hits.items():
reads = parse_reads(data)
# Filter based on ntc sample
if ntc:
# Get number of reads for ntc
if taxid in ntc_hits.keys():
ntc_data = ntc_hits[taxid]
ntc_reads = parse_reads(ntc_data)
else:
ntc_reads = 0
# Filter based on difference in read counts between sample and ntc
if (reads > min_reads_w_ntc) and (ntc_reads < max_reads_ntc):
add_taxid(all_hits, taxid, filtered_hits, skipped_hits)
elif ((ntc_reads / reads) < min_ratio) and (reads > min_reads_ratio):
add_taxid(all_hits, taxid, filtered_hits, skipped_hits)
# Filter based on number of reads
else:
if reads > min_reads:
add_taxid(all_hits, taxid, filtered_hits, skipped_hits)
sorted_hits = sorted(list(filtered_hits.items()), key=lambda k: k[1][0], reverse=True)
print("Found {} species in the sample".format(len(filtered_hits)))
for hit in filtered_hits:
print (hit, filtered_hits[hit])
# Download refseq assembly summary file from ncbi
print("Downloading NCBI refseq reference summary file")
download_ftp(reference_summary_refseq)
# Get reference ids for filtered taxids
taxid_accesion_map = {}
# Search refseq for references
missing_acc_refseq = parse_accession(reference_summary_refseq, filtered_hits.keys(), taxid_accesion_map)
if len(missing_acc_refseq) > 0:
# Download genbank assembly summary file from ncbi
print("Downloading NCBI genbank reference summary file")
download_ftp(reference_summary_genbank)
# Search genbank for references that are not found in refseq
missing_acc = parse_accession(reference_summary_genbank, missing_acc_refseq, taxid_accesion_map)
if len(missing_acc) > 0:
print("Failed to identify accession number of reference for taxons: {}".format(", ".join(missing_acc)))
# Create summary files
with open(summary_filtered, "w") as out:
out.write("taxonomic_id\treads\tspecies\taccession_id\n")
for taxid, info in sorted_hits:
out.write("\t".join([taxid, info[0], info[1], taxid_accesion_map[str(taxid)]]) + "\n")
with open(summary_skipped, "w") as out:
out.write("taxonomic_id\tspecies\n")
for taxid, name in skipped_hits.items():
out.write(str(taxid) + "\t" + name + "\n")
def get_all_hits(results, classifier):
"""Parse all hits in classifier report for species level"""
all_hits = {}
with open(results, 'r') as f:
res = f.readlines()[1:]
for hit in res:
hit = hit.strip().split("\t")
if classifier == "kaiju":
all_hits[hit[3]] = [hit[2], hit[4]]
elif classifier in ["kraken2", "centrifuge"]:
reads = hit[1]
level = hit[3]
name = hit[5].strip().rstrip()
if level == "S":
taxid = hit[4]
all_hits[taxid] = [reads, name]
elif level.startswith("S") or level == "-":
if name not in ["root","cellular organisms","Opisthokonta","Eumetazoa","Bilateria","Deuterostomia","Craniata","Vertebrata","Gnathostomata","Teleostomi","Euteleostomi","Sarcopterygii"," Eutheria","Dipnotetrapodomorpha","Tetrapoda","Amniota","Theria","Eutheria","Boreoeutheria","Euarchontoglires","Haplorrhini","Simiiformes","Catarrhini","Hominoidea","Homininae","Pseudomonas fluorescens group"]:
all_hits[taxid].append([reads, name, hit[4]])
else:
print("No classifier specified. Supported classifiers are kraken2, centrifuge and kaiju.")
sys.exit(1)
return all_hits
def parse_reads(data):
reads = int(data[0])
return reads
def add_taxid(all_hits, taxid, output, skipped):
"""Add taxonomic ID of species or one of the strains of the species to the output"""
data = all_hits[taxid]
# Check if substrain of taxon should be used
if len(data) > 2:
# Compare reads with substrains for species
reads = parse_reads(data)
for sub in data[2:]:
if int(sub[0])/reads > 0.75:
taxid = sub[2]
data = sub
name = data[1]
# Filter out unwanted species
if "phage" in name.lower() or taxid == "9606" or "group" in name.lower() or "unclassified" in name.lower():
skipped[taxid] = name
return
#elif contaminant
output[taxid] = [data[0], name]
def download_ftp(path):
"""Download file"""
try:
command = "wget {}".format(path)
proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = proc.communicate()
print("output", output)
print("error", error)
except Exception as e:
print("Could not download file: {}\n".format(path) + e)
sys.exit(1)
def parse_accession(referencefile, taxids, taxid_accesion_map):
"""Parses NCBI summary file and chooses one reference accession number for each species in input list, returns list of missing taxons"""
missing_acc = []
with open(os.path.basename(referencefile), "r") as f:
references = pd.read_csv(f, sep='\t', skiprows=1, index_col=0, dtype=str)
for taxid in taxids:
refs = references.copy()
refs = refs.loc[refs["taxid"] == taxid]
if len(refs) == 1:
acc = refs.index.tolist()[0]
elif "reference genome" in list(refs["refseq_category"]):
acc = refs.index[refs["refseq_category"] == "reference genome"].tolist()[0]
elif "representative genome" in list(refs["refseq_category"]):
acc = refs.index[refs["refseq_category"] == "representative genome"].tolist()[0]
elif "Complete Genome" in list(refs["assembly_level"]):
acc = refs.index[refs["assembly_level"] == "Complete Genome"].tolist()[0]
elif len(refs) > 0:
acc = refs.index.tolist()[0]
else:
acc = None
missing_acc.append(taxid)
# Add identified references
taxid_accesion_map[taxid] = acc
return missing_acc
main()
|
Clinical-Genomics/Metoid
|
bin/extractReferences.py
|
extractReferences.py
|
py
| 8,493 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10422627143
|
from __future__ import annotations
import asyncio
import dataclasses
import logging
import uuid
from typing import TYPE_CHECKING, Self
from PySide6.QtCore import QObject, Signal
from randovania.bitpacking.json_dataclass import JsonDataclass
from randovania.interface_common.players_configuration import INVALID_UUID
from randovania.lib import json_lib, migration_lib
if TYPE_CHECKING:
from collections.abc import Iterable
from pathlib import Path
_MIGRATIONS = [
# lambda data: data,
]
CURRENT_VERSION = migration_lib.get_version(_MIGRATIONS)
def migrate_to_current(data: dict):
return migration_lib.apply_migrations(data, _MIGRATIONS, copy_before_migrating=True)
@dataclasses.dataclass(frozen=True)
class WorldServerData(JsonDataclass):
world_name: str
session_id: int
session_name: str
def _combine_tuples(existing: tuple[int, ...], new_indices: Iterable[int]) -> tuple[int, ...]:
new = set(existing)
for it in new_indices:
new.add(it)
return tuple(sorted(new))
@dataclasses.dataclass(frozen=True)
class WorldData(JsonDataclass):
collected_locations: tuple[int, ...] = ()
uploaded_locations: tuple[int, ...] = ()
latest_message_displayed: int = 0
server_data: WorldServerData | None = None
def extend_collected_location(self, new_indices: Iterable[int]) -> Self:
return dataclasses.replace(
self,
collected_locations=_combine_tuples(self.collected_locations, new_indices),
)
def extend_uploaded_locations(self, new_indices: Iterable[int]) -> Self:
return dataclasses.replace(
self,
uploaded_locations=_combine_tuples(self.uploaded_locations, new_indices),
)
class WorldDatabase(QObject):
_all_data: dict[uuid.UUID, WorldData]
_persist_path: Path
WorldDataUpdate = Signal()
def __init__(self, persist_path: Path):
super().__init__()
self.logger = logging.getLogger(__name__)
persist_path.mkdir(parents=True, exist_ok=True)
self._persist_path = persist_path
self.logger.info("Using %s as database path", persist_path)
self._all_data = {}
self._lock = asyncio.Lock()
async def _read_data(self, uid: uuid.UUID) -> WorldData:
raw_data = await json_lib.read_path_async(self._persist_path.joinpath(f"{uid}.json"))
return WorldData.from_json(migrate_to_current(raw_data)["data"])
async def _write_data(self, uid: uuid.UUID, data: WorldData):
json_lib.write_path(
self._persist_path.joinpath(f"{uid}.json"),
{
"schema_version": CURRENT_VERSION,
"data": data.as_json,
},
)
async def load_existing_data(self):
for f in self._persist_path.glob("*.json"):
try:
uid = uuid.UUID(f.stem)
except ValueError:
self.logger.warning("File name is not a UUID: %s", f)
continue
if uid != INVALID_UUID:
self._all_data[uid] = await self._read_data(uid)
def get_data_for(self, uid: uuid.UUID) -> WorldData:
if uid == INVALID_UUID:
raise ValueError("UID not allowed for Multiworld")
if uid not in self._all_data:
self._all_data[uid] = WorldData()
return self._all_data[uid]
async def set_data_for(self, uid: uuid.UUID, data: WorldData):
await self.set_many_data({uid: data})
async def set_many_data(self, new_data: dict[uuid.UUID, WorldData]):
async with self._lock:
for uid, data in new_data.items():
if data != self._all_data.get(uid):
self._all_data[uid] = data
await self._write_data(uid, data)
self.WorldDataUpdate.emit()
def get_locations_to_upload(self, uid: uuid.UUID) -> tuple[int, ...]:
data = self.get_data_for(uid)
return tuple(i for i in sorted(data.collected_locations) if i not in data.uploaded_locations)
def all_known_data(self) -> Iterable[uuid.UUID]:
yield from self._all_data.keys()
|
randovania/randovania
|
randovania/interface_common/world_database.py
|
world_database.py
|
py
| 4,141 |
python
|
en
|
code
| 165 |
github-code
|
6
|
5672514853
|
def get_config():
import os
from board_game.states.gomoku_state import GomokuState
from .gomoku_dqn import GomokuDqnModel
board_shape = (9, 9)
target = 5
print('board_shape:', board_shape)
print('target:', target)
state = GomokuState(board_shape = board_shape, target = target)
model_path = 'gomoku_dqn_model_{0}_{1}_{2}'.format(board_shape[0], board_shape[1], target)
model_path = os.path.join(os.path.dirname(__file__), model_path)
model = GomokuDqnModel(board_shape = state.board_shape, action_dim = state.get_action_dim(), model_path = model_path)
config = {
'model_path' : model_path,
'replay_memory_size' : 64 * 1024,
'discount' : 0.95,
'batch_size' : 16,
'epoch_num' : 4,
'learning_rate' : 0.001,
'episode_num' : 2000000,
}
return state, model, config
def main():
from . import dqn_train
dqn_train.main('gomoku', get_config)
|
lbingbing/machine-learning-board-game.old
|
board_game/players/dqn/gomoku_dqn_train.py
|
gomoku_dqn_train.py
|
py
| 960 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20824483233
|
import inflect
def main():
p = inflect.engine()
names = []
while True:
try:
name = input("Name: ")
names.append(name)
except(EOFError, KeyboardInterrupt):
names = p.join(names)
print("Adieu, adieu, to " + names)
quit()
main()
|
lauriwesterlund/CS50P
|
Solutions/adieu.py
|
adieu.py
|
py
| 341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38259209826
|
import cv2
#Configurable Parameters
inputValue = int(input("Enter the scale value to resize the image: (0 - 100): "))
if inputValue >= 0 and inputValue <= 100:
source = "wx.jpg"
destination = 'newImage.png'
scale_percent = inputValue
src = cv2.imread(source, cv2.IMREAD_UNCHANGED)
#cv2.imshow("title", src)
#Percentage by which the image is resize
#Calculate the 50 percent of original dimensions
new_width = int(src.shape[1] * scale_percent / 100)
new_height = int(src.shape[0] * scale_percent / 100)
dsize = (new_width, new_height)
output = cv2.resize(src, dsize)
cv2.imwrite(destination, output)
#cv2.waitKey(0)
else:
print("Enter correct values")
|
sundaram-sharma/image-resizer-python
|
main.py
|
main.py
|
py
| 715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22378324072
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .serializers import PersonSerializer
from .models import Person
from rest_framework import status
from rest_framework.permissions import IsAdminUser
from rest_framework.decorators import api_view, permission_classes
@api_view(['GET','POST'])
def home(request):
if request.method == 'POST':
name = request.data['name']
return Response({'name':f'my name is {name}'})
else:
return Response({'name':'sina'})
@api_view()
def persons(request):
person = Person.objects.all()
ser_data = PersonSerializer(person,many=True)
return Response(ser_data.data,status=status.HTTP_200_OK)
@api_view()
@permission_classes([IsAdminUser])
def person(request,name):
try:
person = Person.objects.get(name=name)
except Person.DoesNotExist:
return Response({'error':'this user does not exist'},status=status.HTTP_404_NOT_FOUND)
ser_data = PersonSerializer(person)
return Response(ser_data.data,status=status.HTTP_200_OK)
#create_user
@api_view(['POST'])
def person_create(request):
info=PersonSerializer(data=request.data)
if info.is_valid():
info.save()
return Response({'message':'ok'},status=status.HTTP_201_CREATED)
else:
return Response(info.errors,status=status.HTTP_400_BAD_REQUEST)
|
sinajamshidi247/django_rest_framework
|
A/home/views.py
|
views.py
|
py
| 1,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
301052917
|
import yaml
import glob
import dropbox
import os
import sys
import time, threading
import RPi.GPIO as GPIO
import time
import pygame
import sentry_sdk
from sentry_sdk import start_transaction
def loadConfig(file):
with open(file, 'r') as stream:
config_dict = yaml.safe_load(stream)
return config_dict
def clearCache(path):
files = glob.glob(path + '/*')
for f in files:
os.remove(f)
def fetchAndCacheSoundtrack(dropboxAccessToken, toPath, fromPath):
with start_transaction(op="task", name="fetchAndCacheSoundtrack"):
with dropbox.Dropbox(dropboxAccessToken) as dbx:
# List available fiels
files = dbx.files_list_folder(path='/' + fromPath, include_non_downloadable_files=False)
if len(files.entries) <= 0:
raise Exception('No files found')
# Select the last file in the folder
fileToFetch = files.entries[-1]
print(fileToFetch)
_, res = dbx.files_download(path=fileToFetch.path_lower)
# Cache the fetched file
_, extension = os.path.splitext(fileToFetch.name)
cachedFilePath = toPath + '/' + fromPath + '_music' + extension
with open(cachedFilePath, 'wb') as f:
f.write(res.content)
print('Soundtrack cached', cachedFilePath)
def configureGPIPTrigger(gpio_pin, cb):
GPIO.setup(gpio_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(gpio_pin, GPIO.RISING, callback=cb, bouncetime=500)
config = loadConfig('config.yaml')
print('Config loaded')
sentry_sdk.init(
dsn=config['sentry'],
environment=config['sentry_env'],
ignore_errors=[KeyboardInterrupt],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0
)
cachePath = config['cache_path']
clearCache(cachePath)
try:
fetchAndCacheSoundtrack(config['dropbox_access_token'], cachePath, 'up')
except:
print('No up file found')
try:
fetchAndCacheSoundtrack(config['dropbox_access_token'], cachePath, 'down')
except:
print('No down file found')
cachedUpFiles = glob.glob(cachePath + '/up_*')
cachedDownFiles = glob.glob(cachePath + '/down_*')
music_file_up = None
music_file_down = None
if len(cachedUpFiles) > 0:
music_file_up = cachedUpFiles[-1]
if len(cachedDownFiles) > 0:
music_file_down = cachedDownFiles[-1]
print('Ready using cached soundtrack up/down', music_file_up, music_file_down)
# Configure GPIO
pin_up = config['pi_signal_gpio_up']
pin_down = config['pi_signal_gpio_down']
pin_check_interval = config['pi_signal_interval_ms']
# Configure pygame mixer
pygame.mixer.init()
pygame.mixer.music.set_volume(1.0)
fade_ms = 1000
max_music_play_seconds = int(config['soundtrack_play_seconds'])
def stop_music():
print("Fading out music for", fade_ms, "ms")
pygame.mixer.music.fadeout(fade_ms)
pygame.mixer.music.unload()
def play_music(gpio_trigger):
print("Play music for trigger", gpio_trigger)
is_music_playing = pygame.mixer.music.get_busy()
if is_music_playing:
print("Music already playing")
return
is_pin_up = gpio_trigger == pin_up
selected_music = (music_file_up, music_file_down)[is_pin_up]
if selected_music == None:
print('No music to play')
return
print("Playing music for", max_music_play_seconds, "seconds", selected_music)
pygame.mixer.music.load(selected_music)
pygame.mixer.music.play(fade_ms=fade_ms)
threading.Timer(max_music_play_seconds, stop_music).start()
GPIO.setmode(GPIO.BCM)
configureGPIPTrigger(pin_up, play_music)
configureGPIPTrigger(pin_down, play_music)
print('Listening to signal on GPIO pins', pin_up, pin_down)
try:
running = True
while running:
time.sleep(1)
except:
print("quitting")
pygame.quit()
GPIO.cleanup()
sys.exit()
|
soundtecas/elevator
|
elevator.py
|
elevator.py
|
py
| 3,991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34338376065
|
# -*- coding: utf-8 -*-
import contextlib
import json
import logging
import re
import starlette_werkzeug_debugger
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import JSONResponse
from starlette.routing import Route
from starlette.testclient import TestClient
def inner_error():
local_var = 'inner'
raise RuntimeError("Raised error")
async def raise_error(request):
local_var = 'outer'
inner_error()
async def ok_response(request):
return JSONResponse("ok")
@contextlib.asynccontextmanager
async def lifespan(app):
# only to test scope["type"]
yield
def build_app(**kwargs):
middleware = [
Middleware(starlette_werkzeug_debugger.WerkzeugDebugMiddleware, **kwargs)
]
return Starlette(debug=True, middleware=middleware, lifespan=lifespan, routes=[
Route('/', raise_error),
Route('/ok/', ok_response),
])
def get_middleware(app):
return app.middleware_stack.app
def test_correct_response():
app = build_app()
with TestClient(app) as client:
response = client.get('/ok/')
assert response.status_code == 200
assert response.content == b'"ok"'
def test_error_response():
app = build_app()
client = TestClient(app)
response = client.get('/')
assert response.status_code == 500
assert b"Werkzeug Debugger" in response.content
def test_serve_static():
app = build_app()
client = TestClient(app)
client.get('/')
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'resource', 'f': 'style.css'})
assert response.status_code == 200
assert response.headers['content-type'].startswith('text/css')
def test_printpin(caplog):
caplog.set_level(logging.INFO)
app = build_app(evalex=True, pin_security=True, pin_logging=True)
client = TestClient(app)
client.get('/')
middleware = get_middleware(app)
middleware.pin = '4852'
# dont' print anything
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'printpin', 's': middleware.secret + 'x'})
assert middleware.pin not in caplog.text
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'printpin', 's': middleware.secret})
assert middleware.pin in caplog.text
def test_pinauth():
app = build_app(evalex=True, pin_security=True, pin_logging=True)
client = TestClient(app)
client.get('/')
middleware = get_middleware(app)
middleware.pin = '4852'
# wrong secret
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin, 's': middleware.secret + 'x'})
assert response.status_code == 500
# wrong pin
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin + '5', 's': middleware.secret})
assert response.status_code == 200
response_content = json.loads(response.content.decode('utf-8'))
assert not response_content['auth']
# correct pin
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin, 's': middleware.secret})
assert response.status_code == 200
response_content = json.loads(response.content.decode('utf-8'))
assert response_content['auth']
assert middleware.pin_cookie_name in response.cookies
def test_console():
app = build_app(evalex=True, pin_security=True, pin_logging=True)
client = TestClient(app)
exception_content = client.get('/').content.decode('utf-8')
middleware = get_middleware(app)
middleware.pin = '4852'
# login
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin, 's': middleware.secret})
cookies = response.cookies
frame_ids = re.findall(r'(?:frame-(\d+))', exception_content)
# content from inner variable
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'local_var', 'frm': frame_ids[-1], 's': middleware.secret})
assert b'inner' in response.content
# content from outer variable
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'local_var', 'frm': frame_ids[-2], 's': middleware.secret})
assert b'outer' in response.content
|
mireq/starlette-werkzeug-debugger
|
tests/test_debugger.py
|
test_debugger.py
|
py
| 4,009 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11313840170
|
import numpy as np
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import string
import tqdm
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
class preprocessing:
# Dimension adalah dimensi vektor embedding yang digunakan
# Embedder adalah object fasttext dari library gensim
def __init__(self,dimension,embedder):
self.dimension=dimension
self.embedder=embedder
# Digunakan untuk menghitung jumlah masing2 kata
# Input berupa list of list of token
# Output berupa dictionary yang memetakan kata menjadi frekuensi kemunculan kata tersebut
def word_count(self,sentences):
counts = dict()
for sentence in sentences:
for word in sentence:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
# Melakukan filtering data berdasarkan kemunculan kata, jumlah karakter pada kata, jumlah kata pada kalimat
# Kata dengah jumlah kemunculan < frequency akan dibuang
# Kata dengan jumlah karakter < word_length akan dibuang
# Kalimat dengan jumlah kata < N_words akan dibuang
# Output berupa list of list of token(features) dan list of string(labels)
def getFilteredData(self,product_title,labels,frequency, N_words, word_length):
result=self.word_count(product_title)
new_product_title=[]
for sentence in tqdm.tqdm(product_title):
new_product_title.append([word for word in sentence if result[word]>=frequency and len(word)>=word_length])
new_features=[]
new_labels=[]
for index,title in tqdm.tqdm(enumerate(new_product_title)):
if(len(title)>=N_words):
new_features.append(title)
new_labels.append(labels[index])
return new_features,new_labels
# Method untuk menghasilkan features berupa Tf-Idf dari input
# Input berupa list of list of token
# Output berupa vektor tfidf(list of list of real_value), object CountVectorizer, object TfidfTransformer
def getTfIdf(self,new_product_title):
concatenated_product_title=[]
for sentence in tqdm.tqdm(new_product_title):
concatenated_product_title.append(" ".join(sentence))
cv=CountVectorizer()
result=cv.fit_transform(concatenated_product_title)
tftransformer = TfidfTransformer(smooth_idf=False)
final_result=tftransformer.fit_transform(result)
return final_result,cv,tftransformer
# Method untuk menghapus angka dan tanda baca, serta melakukan tokenizing kata
# Input berupa list of string
# Output berupa list of list of token
def tokenize(self,input_string):
input_string=''.join(i for i in input_string if not i.isdigit())
result_string=input_string.lower()
target_punctuations=string.punctuation
for punctuation in target_punctuations:
result_string=result_string.replace(punctuation, ' ')
result_string=result_string.strip(' ').split()
return result_string
# Method untuk mengubah kata menjadi vektor fasttext
# Input berupa token
# Output berupa vektor dengan dimensi self.dimension
def vectorize_word(self,product_title):
try:
result=self.embedder[product_title]
except KeyError:
result=0
return result
# Method untuk mengubah kalimat(list of token) menjadi vektor fasttext
# Penggabungan vektor kata menjadi vektor kalimat menggunakan penjumlahan vektor
# Vektor kata dapat diberi bobot berupa nilai tf-idf dari kata tersebut
# doc_occ adalah dictionary yang memetakan kata menjadi jumlah kemunculan kata tersebut pada seluruh dokumen
# total_doc adalah jumlah seluruh dokumen
# Untuk referensi lebih lanjut dari doc_occ dan total_doc, silakan melihat rumus tf-idf
def vectorize_sentence(self,input_sentence,doc_occ=None,total_doc=None):
if(False):
N_success=0
result_vector=np.zeros(self.dimension)
for word in input_sentence:
result_vector+=self.vectorize_word(word)
if(np.sum(self.vectorize_word(word))!=0):
N_success+=1
if(N_success<2):
result_vector=np.zeros(self.dimension)
return result_vector
else:
N_success=0
result_vector=np.zeros(self.dimension)
ll=len(input_sentence)
for word in input_sentence:
c=0
for word2 in input_sentence:
if(word==word2):
c+=1
if(word in list(doc_occ)):
result_vector+=(self.vectorize_word(word)*((c/ll)*(np.log(total_doc/doc_occ[word]))))
else:
result_vector+=(self.vectorize_word(word))
if(np.sum(self.vectorize_word(word))!=0):
N_success+=1
if(N_success<2):
result_vector=np.zeros(self.dimension)
return result_vector
# Method yang merupakan pipeline preprocessing dari data mentah menjadi data siap pakai untuk klasifikasi
# Input berupa list of string, list of string, dan object LabelEncoder(optional, jika ingin menggunakan object custom)
# Output berupa pandas dataframe(features dan labels tergabung menjadi satu) dengan nama kolom "Labels" untuk labels
# ,nama kolom angka 1-100 untuk features, dan object LabelEncoder(jika user tidak menyediakan LabelEncoder custom)
def preprocess_data(self,features,labels,encoder=None):
embedded_data=pd.DataFrame()
print("TOKENIZE DATA")
embedded_data["Features"]=[self.tokenize(title) for title in tqdm.tqdm(features)]
print("APPLYING FILTER")
nf,nl=self.getFilteredData(embedded_data["Features"],list(labels),50,2,3)
embedded_data=pd.DataFrame()
embedded_data["Features"]=nf
voc=set()
for sentence in tqdm.tqdm(embedded_data["Features"]):
for word in sentence:
voc.add(word)
total_doc=len(embedded_data["Features"])
doc_occ={}
for element in tqdm.tqdm(list(voc)):
count_occ=0
for sentence in embedded_data["Features"]:
if (element in sentence):
count_occ+=1
doc_occ[element]=count_occ
print("ENCODING LABELS")
if(encoder==None):
label_encoder=LabelEncoder()
embedded_data["Labels"]=label_encoder.fit_transform(nl)
else:
label_encoder=encoder
embedded_data["Labels"]=label_encoder.transform(nl)
print("CONVERTING SENTENCE TO VECTOR")
embedded_data["Features Vector"]=[self.vectorize_sentence(title,doc_occ,total_doc) for title in tqdm.tqdm(embedded_data["Features"])]
print("SAVE VECTOR TO PANDAS DATAFRAME")
for i in tqdm.tqdm(range(self.dimension)):
embedded_data[i]=[value[i] for value in embedded_data["Features Vector"]]
embedded_data = embedded_data[[*range(self.dimension),"Labels"]]
if(encoder==None):
return embedded_data, label_encoder
else:
return embedded_data
# Input berupa 2 list of string dan jumlah top N class yang diinginkan
# Output berupa data dengan format sama seperti input tetapi hanya mengandung top N class
def getFilteredClasses(self,product_title,labels,top_N):
print("1/3")
sorted_by_value = sorted(self.class_count(labels).items(), key=lambda kv: kv[1])
valid_class=[value[0] for value in sorted_by_value[-top_N:]]
print("2/3")
product_title=list(product_title)
new_features=[]
new_labels=[]
for index,label in tqdm.tqdm(enumerate(labels)):
if(label in valid_class):
new_labels.append(label)
new_features.append(product_title[index])
return new_features,new_labels
# Menghitung nilai Tf-Idf dari suatu kata
# Input berupa nilai real yang dibutuhkan untuk menghitung Tf-Idf
# Output berupa nilai Tf-Idf
def tfidf_word(self,total_occ,total_words,doc_occ,total_doc):
return (total_occ/total_words)*np.log(total_doc/doc_occ)
# Method untuk menghasilkan features berupa Tf-Idf dari input tetapi hanya menggunakan kelas yang terdapat di vocab
# Input berupa list of list of token dan list of string(vocab)
# Output berupa vektor tfidf(list of list of real_value), object CountVectorizer, object TfidfTransformer
def getTfIdfCustom(self,new_product_title,vocab):
print("1/3")
concatenated_product_title=[]
for sentence in tqdm.tqdm(new_product_title):
concatenated_product_title.append(" ".join(sentence))
print("2/3")
cv=CountVectorizer(vocabulary=vocab)
result=cv.fit_transform(concatenated_product_title)
print("3/3")
tftransformer = TfidfTransformer(smooth_idf=False)
final_result=tftransformer.fit_transform(result)
return final_result,cv,tftransformer
# Menghitung frekuensi kemunculan setiap kata dari list of list of token
# Output berupa dictionary kata dan frekuensi kemunculannya
def word_count(self,sentences):
counts = dict()
print("1/1")
for sentence in sentences:
for word in sentence:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
# Menghitung frekuensi kemunculan setiap kata dari list of token
# Output berupa dictionary kata dan frekuensi kemunculannya
def class_count(self,words):
counts = dict()
for word in words:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
# Sama seperti method word_count, tetapi yang dihitung hanya data yang termasuk kelas target
def word_count_label(self,sentences,labels,target):
counts = dict()
print("1/1")
for index,sentence in enumerate(sentences):
if(labels[index]==target):
for word in sentence:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
|
dryantl/product-title-classification
|
preprocessing_pipeline.py
|
preprocessing_pipeline.py
|
py
| 10,629 |
python
|
id
|
code
| 0 |
github-code
|
6
|
11066099241
|
import os
import pandas as pd
import glob
root_dir = 'Data/MeasureBoundingBoxAnnotations'
directories = os.listdir(root_dir)
all_csv_files = []
for directory in directories:
file_path = root_dir + '/' + directory + '/'
all_csv_files += [i for i in glob.glob(file_path + '*.csv')]
combined_csv = pd.concat([pd.read_csv(f) for f in all_csv_files])
combined_csv.to_csv('all_data.csv', index=False)
|
greyfertich/PDF-Player
|
merge_csv_files.py
|
merge_csv_files.py
|
py
| 409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38610058452
|
import subprocess
import re
import skia
import io
from platform import system
from pathlib import Path
from PIL import Image
from xml.etree import ElementTree as ET
from typing import Any, Tuple, Union, List
class SVG:
"""SVG class to load, edit, render, and export svg files using pillow and inkscape."""
if system() == "Darwin":
__SYSTEM_DPI = 72
else:
__SYSTEM_DPI = 96
__WIDTH_HEIGHT_REGEX = "^[1-9]\d*(px|)$|^[1-9]\d*\.?\d*(v(h|w)|\%)$"
__ICO_SIZES = [16, 32, 48, 64, 256]
__RESAMPLING_FILTERS = {
"nearest": 0,
"lanczos": 1,
"bilinear": 2,
"bicubic": 3,
"box": 4,
"hamming": 5,
"rerender": 6
}
def __init__(self, fp: Union[str, Path, bytes]):
"""Create an SVG instance from a svg file.
Args:
fp (str | Path | bytes): The svg filepath.
"""
# Check if filepath is valid
self.fp: Path = Path(fp).resolve()
if not self.fp.exists():
raise FileNotFoundError(f"SVG file '{fp}' does not exist.")
# Load SVG xml file
self.root: ET.Element = ET.parse(fp).getroot()
def __str_to_nu(self, input: str) -> Tuple[float, str]:
"""Extracts size number and units from string.
Args:
input (str): String in the form of '{number}{units}'. Accepts strings containing integers and floats.
Returns:
Tuple[float, str]: The number and unit values.
"""
unit = re.findall("(|mm|px|in|vw|\%)$", input)[0]
num = float(re.findall("^([1-9]\d*\.?\d*|[1-9]\d*)", input)[0])
return num, unit
def __to_px(self, num: float, unit: str, view: Tuple[float, float] = None) -> int:
"""Converts a number of unit types mm, cm, in, vw, vh, and % to pixels.
Args:
number (float): The number to convert to pixels
unit (str): The unit of the number for conversion. Currently supports mm, cm, in, vw, vh, and %.
view (Tuple[float, float]): The svg view box dimensions for units of vw, vh, and %.
Raises:
ValueError: View box is not provided for units of vw, vh, and %.
Returns:
int: The number converted to pixel units.
"""
if unit == 'mm':
num *= (__class__.__SYSTEM_DPI / 25.4)
if unit == 'cm':
num *= (__class__.__SYSTEM_DPI / 2.54)
elif unit == 'in':
num *= __class__.__SYSTEM_DPI
if unit in ["%", "vw", "vh"]:
if not view:
raise ValueError(f"View box is necessary for conversions involving {unit}'s")
sw, sh = view
if unit == "vh":
num *= sh / 100.
else:
num *= sw / 100.
return num
@property
def size(self) -> Tuple[int, int]:
"""The size of the svg file in pixels. Defaults to (300, 150).
"""
viewBox = tuple(float(i) for i in self.root.attrib['viewBox'].split(
)) if 'viewBox' in self.root.attrib else (0, 0, 300, 150)
width = self.root.attrib['width'] if 'width' in self.root.attrib else "100vw"
height = self.root.attrib['height'] if 'height' in self.root.attrib else "100vh"
sw, sh = float(viewBox[2] - viewBox[0]), float(viewBox[3] - viewBox[1])
_, uw = self.__str_to_nu(width)
_, uh = self.__str_to_nu(height)
if uw in ["mm", "in", "cm"]:
sw = self.__to_px(sw, uw)
if uh in ["mm", "in", "cm"]:
sh = self.__to_px(sh, uh)
return int(sw), int(sh)
@property
def viewBox(self) -> Tuple[int, int, int, int]:
"""The viewBox of the svg file. Defaults to '0 0 300 150'."""
if 'viewBox' not in self.root.attrib:
self.root.attrib['viewBox'] = "0 0 300 150"
return tuple(int(i) for i in self.root.attrib['viewBox'].split())
@viewBox.setter
def viewBox(self, value: Tuple[int, int, int, int]):
"""Setter for viewBox."""
print(" ".join(str(v) for v in value))
self.root.attrib['viewBox'] = " ".join(str(v) for v in value)
@property
def width(self) -> str:
"""The width of the svg. Defaults to 100vw."""
return self.__get_attrib('width', '100vw')
@width.setter
def width(self, value: str) -> str:
"""Setter for width."""
self.__set_attrib('width', value, __class__.__WIDTH_HEIGHT_REGEX)
@property
def height(self) -> str:
"""The width of the svg. Defaults to 100vh."""
return self.__get_attrib('height', '100vh')
@height.setter
def height(self, value: str) -> str:
"""Setter for height."""
self.__set_attrib('height', value, __class__.__WIDTH_HEIGHT_REGEX)
def __set_attrib(self, attrib: str, value: str, regex: str = None):
"""Helper function for setting string attributes in the XML tree.
Args:
attrib (str): the target attribute.
value (str): the value to set.
regex (str | None, optional): A regex str for value checking. Defaults to None.
Raises:
ValueError: Value does not satisfy the regex condition.
"""
if regex and not re.findall(regex, value):
raise ValueError(
f"Invalid value for svg attribute {attrib}:", value)
self.root.attrib[attrib] = value
def __get_attrib(self, attrib: str, default: Any = None) -> Any:
"""Helper function for getting an svg attribute from the XML tree.
Args:
attrib (str): the attribute to return.
default (Any, optional): The default value of the attribute if it does not exist.
Returns:
Any: The attribute value. Will return None if attribute does not exist and no default value was specified.
"""
if attrib not in self.root.attrib and default:
if default:
self.root.attrib[attrib] = default
else:
return None
return self.root.attrib[attrib]
def __calc_sizes(self, dpi: List[int] = None, sizes: List[Union[int, Tuple[int, int]]] = None) -> List[Tuple[int, int]]:
"""Helper function to calculate the sizes of all images being rendered. Converts DPI values in pixel dimension.
Args:
dpi (List[int], optional): DPI of the images being rendered. Defaults to None.
sizes (List[Union[int, Tuple[int, int]]] | None, optional): Sizes of the images being rendered. Defaults to None.
Returns:
List[Tuple[int, int]]: A list of sizes (int pairs) of the images being rendered.
"""
sw, sh = self.size
if not dpi and not sizes:
values = [(sw, sh)]
else:
values = []
if dpi:
values.extend(
[(round(sw * (i / __class__.__SYSTEM_DPI)), round(sh * (i / __class__.__SYSTEM_DPI))) for i in dpi])
if sizes:
values.extend([i if isinstance(i, tuple) else (
i, round(i * sh/sw)) for i in sizes])
return values
def __max_size(self, sizes: List[Tuple[int, int]]) -> Tuple[int, int]:
"""Helper function to determine the largest image size to render such that all other sizes are a down scaling of it.
Args:
sizes (List[Tuple[int, int]]): The sizes of the images being rendered.
Returns:
Tuple[int, int]: A size (int pair) representing the largest necessary image to render.
"""
sw, sh = self.size
max_width, max_height = (max(i) for i in zip(*sizes))
multi = max(max_width / sw, max_height / sh)
return round(sw * multi), round(sh * multi)
def __im_skia(self, size: Tuple[int, int]) -> Image.Image:
"""Helper function to render a single PIL.Image object using skia-python.
Args:
size (Union[int, Tuple[int, int]], optional): Size of the rendered image.
Returns:
Image.Image: An instance of PIL.Image.Image.
"""
path = Path(self.fp).resolve()
skia_stream = skia.Stream.MakeFromFile(str(path))
skia_svg = skia.SVGDOM.MakeFromStream(skia_stream)
w, h = skia_svg.containerSize()
sw, sh = size
surface = skia.Surface(round(sw), round(sh))
with surface as canvas:
canvas.scale(round(sw) / w, round(sh) / h)
skia_svg.render(canvas)
with io.BytesIO(surface.makeImageSnapshot().encodeToData()) as f:
img = Image.open(f)
img.load()
return img
def __im_inkscape(self, size: Tuple[int, int], margin: int = None, area: str = 'page') -> Image.Image:
"""Helper function to render a single PIL.Image object using inkscape.
Args:
size (Union[int, Tuple[int, int]], optional): Size of the rendered image.
margin (int, optional): Margins on the rendered image. Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
Returns:
Image.Image: An instance of PIL.Image.Image.
"""
path = Path(self.fp).resolve()
options = ["inkscape", "--export-filename=-", "--export-type=png"]
if area.lower() == 'page':
options.extend(["--export-area-page"])
elif area.lower() == 'drawing':
options.extend(["--export-area-drawing"])
else:
options.extend([f"--export-area={area}"])
sw, sh = size
if size:
options.extend([f"--export-width={sw}", f"--export-height={sh}"])
if margin:
options.extend([f"--export-margin={margin}"])
if not path.exists():
return None
else:
options.extend([f"{path}"])
try:
pipe = subprocess.Popen(options, stdout=subprocess.PIPE)
except FileNotFoundError:
raise FileNotFoundError("Please make sure inkscape is installed and has been added to the PATH")
pipe.stdout.readline()
pipe.stdout.readline()
return Image.open(pipe.stdout)
def __im(self, size: Tuple[int, int], margin: int = None, area: str = 'page', renderer: str = 'skia') -> Image.Image:
"""Helper function to choose proper renderer. Throws an error if the renderer is not supported.
"""
if renderer == 'skia':
return self.__im_skia(size)
elif renderer == 'inkscape':
return self.__im_inkscape(size, margin, area)
else:
raise ValueError(
"Invalid renderer. Only supported renderers are 'skia' and 'inkscape'")
def __im_multi(self, sizes: List[Tuple[int, int]], margin: int = None, area: str = 'page', filter: str = "lanczos", renderer: str = "skia") -> List[Image.Image]:
"""Helper function to generate images of multiple specified sizes.
Args:
sizes (List[Union[int, Tuple[int, int]]], optional): Sizes of the images to render.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
Raises:
ValueError: Filter is invalid.
Returns:
List[Image.Image]: A list of PIL.Image.Image instances of different sizes.
"""
if filter not in __class__.__RESAMPLING_FILTERS:
raise ValueError(
f"Invalid filter: {filter}\nValid filters are: {' '.join(__class__.__RESAMPLING_FILTERS.keys())}")
if filter == "rerender":
return list(self.__im(size=size, margin=margin, area=area, renderer=renderer) for size in sizes)
else:
img = self.__im(size=self.__max_size(sizes), margin=margin, area=area, renderer=renderer)
return list(img.resize(size, __class__.__RESAMPLING_FILTERS[filter.lower()]) for size in sizes)
def __export(img: Image.Image, stem: str = None, format: Union[str, List[str]] = "png"):
"""Helper function to export a PIL.Image.Image instance to another image format.
Args:
img (Image.Image): The image to export.
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
Raises:
FileNotFoundError: The target directory for exporting does not exist.
"""
if isinstance(format, str):
format = [format]
parent = Path(stem).resolve().parent
if not parent.is_dir():
raise FileNotFoundError(
f"Could not locate the directory: {parent}\nPlease make sure the directory exists")
for f in format:
if f == "ico":
img.save(f"{stem}.{f}", sizes=[
(i, i) for i in __class__.__ICO_SIZES if i < img.width and i < img.height])
continue
try:
img.save(f"{stem}.{f}")
except OSError:
img.convert("RGB").save(f"{stem}.{f}")
def __export_multi(img: List[Image.Image], stem: str = None, format: Union[str, List[str]] = "png"):
"""Helper function to export multiple images in different formats.
Args:
img (List[Image.Image]): A list of images to export.
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
"""
for i in img:
__class__.__export(i, f"{stem}_{i.size[0]}_{i.size[1]}", format)
def im(self, dpi: Union[int, List[int]] = None, size: List[Union[int, Tuple[int, int]]] = None, margin: int = None, area: str = 'page', filter: str = "lanczos", renderer: str = "skia") -> Union[Image.Image, List[Image.Image]]:
"""
Render the SVG as PIL.Image instance. The default rendering size is one the one provided by the SVG file.
Args:
dpi (int | List[int], optional): The DPI(s) to render the image(s) at.
size (List[Union[int, Tuple[int, int]]], optional): The size(s) to render the image(s) at.
Can be a single integer (defining the width) or a pair for width and height. Defaults to None.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
Returns:
Union[Image.Image, List[Image.Image]]: _description_
"""
if isinstance(dpi, int):
dpi = [dpi]
if isinstance(size, int):
size = [size]
size = self.__calc_sizes(dpi, size)
if len(size) > 1:
return self.__im_multi(size, margin, area, filter, renderer=renderer)
else:
return self.__im(size[0], margin, area, renderer=renderer)
def save(self, fp: Union[str, Path, bytes] = None):
"""Saves the SVG XML tree.
Args:
fp (str | Path | bytes, optional): The save path. If no path is specified, this will overwrite the original SVG. Defaults to None.
"""
if fp is None:
fp = self.fp
else:
fp = Path(fp).resolve()
ET.ElementTree(self.root).write(fp)
def export(self, stem: str = None, format: Union[str, List[str]] = "png", dpi: Union[int, List[int]] = None, size: List[Union[int, Tuple[int, int]]] = None, margin: int = None, area: str = 'page', filter: str = "lanczos", renderer: str = "skia"):
"""Renders and exports image(s) of specified size(s) as specified format(s).
Args:
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
dpi (int | List[int], optional): The DPI(s) to render the image(s) at. Defaults to 96.
size (List[Union[int, Tuple[int, int]]], optional): The size(s) to render the image(s) at.
Can be a single integer (defining the width) or a pair for width and height. Defaults to None.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
"""
if not stem:
stem = self.fp.stem
img = self.im(dpi, size, margin, area, filter, renderer=renderer)
if isinstance(img, list) and len(img) > 1:
__class__.__export_multi(img, stem, format)
elif isinstance(img, list):
__class__.__export(img[0], stem, format)
else:
__class__.__export(img, stem, format)
@classmethod
def IM(cls, fp: Union[str, Path, bytes], dpi: Union[int, List[int]] = None, size: Union[int, Tuple[int, int]] = None, margin: int = None, area: str = 'page', renderer: str = 'skia'):
"""Classmethod that returns a PIL.Image instance of a specified SVG. Useful if you do not need to create a class object.
Args:
fp (str | Path | bytes): The path of the svg file.
dpi (int, optional): DPI of the rendered image. Defaults to 96.
size (Union[int, Tuple[int, int]], optional): Size of the rendered image. Defaults to None.
margin (int, optional): Margins on the rendered image. Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
Returns:
_type_: _description_
"""
return cls(fp).im(dpi, size, margin, area, renderer)
@classmethod
def EXPORT(cls, fp: Union[str, Path, bytes], stem: str = None, format: Union[str, List[str]] = "png", dpi: Union[int, List[int]] = None, size: Union[int, Tuple[int, int]] = None, margin: int = None, area: str = 'page', filter="lanczos", renderer: str = "skia"):
"""Classmethod that renders an SVG and exports image(s) of specified size(s) as specified format(s). Useful if you do not need to create an SVG class object.
Args:
fp (str | Path | bytes): The path of the svg file.
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
dpi (int | List[int], optional): The DPI(s) to render the image(s) at. Defaults to 96.
size (List[Union[int, Tuple[int, int]]], optional): The size(s) to render the image(s) at.
Can be a single integer (defining the width) or a pair for width and height. Defaults to None.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
"""
cls(fp).export(stem, format, dpi, size, margin, area, filter, renderer)
|
jlwoolf/pillow-svg
|
PILSVG/SVG.py
|
SVG.py
|
py
| 20,691 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29771382768
|
import pandas as pd
import re
df = pd.read_csv('C:\\Users\\NAVEEN\\Documents\\school.xls')
for i in range(len(df)):
print(re.sub('[^A-Za-z0-9]+','',str(df.iloc[i,:1])))
print(df)
res= pd.DataFrame()
|
engineerscodes/PyVisionHUB
|
DATASET.py
|
DATASET.py
|
py
| 208 |
python
|
en
|
code
| 4 |
github-code
|
6
|
24255674964
|
from selenium import webdriver
import csv
import config
import time
class instaInfo:
def __init__(self):
"""
init webdriver
"""
self.driver = webdriver.Chrome('chromedriver')
self.profile_url = ''
self.followers_count = 0
self.ask_url()
def ask_url(self):
"""
get Instagram profile url
"""
self.profile_url = input("Enter Instagram profile link: ")
if self.profile_url[:26] != 'https://www.instagram.com/':
print('Link must be like \'https://www.instagram.com/user_name/\'')
return ask_url(self)
def login_to_instagram(self):
"""
connect and login to Instagram
"""
try:
# connect to Instagram login page
self.driver.get('https://www.instagram.com/accounts/login/')
except Exception as e:
exit(f"Can't connect to: 'https://www.instagram.com/accounts/login/'\nError:{e}")
time.sleep(2)
try:
# input login and password
self.driver.find_element_by_name('username').send_keys(config.INSTAGRAM_LOGIN)
self.driver.find_element_by_name('password').send_keys(config.INSTAGRAM_PASSWORD)
# click to login button
self.driver.find_element_by_css_selector('button.sqdOP.L3NKy.y3zKF').click()
except Exception as e:
exit(f"Can't login!\nError:{e}")
time.sleep(3)
try:
# click save data button
self.driver.find_element_by_css_selector('button.sqdOP.L3NKy.y3zKF').click()
except Exception as e:
exit(f"Can't click !\nError:{e}")
time.sleep(3)
print("Logged in Instagram")
def connect_to_profile(self):
"""
connect to Instagram profile
"""
try:
self.driver.get(self.profile_url)
except Exception as e:
exit(f"Can't connect to: {self.profile_url}\nError:{e}")
time.sleep(3)
print(f"Connected to profile: {self.profile_url}")
def get_followers_count(self):
"""
parse count of followers
"""
try:
self.followers_count = self.driver.find_elements_by_css_selector('span.g47SY')[1].get_attribute('title')
# replace blank and convert to int type
self.followers_count = int(self.followers_count.replace(' ', ''))
except Exception as e:
exit(f"Can't get followers count: {self.profile_url}\nError:{e}")
print(f"{self.profile_url} count of followers: {self.followers_count}")
def get_profile_followers(self):
"""
get followers info
"""
# click to followers button
self.driver.find_element_by_css_selector('a.-nal3').click()
time.sleep(3)
# load all followers
last_element = ''
while last_element != self.driver.find_elements_by_css_selector('a.FPmhX.notranslate._0imsa')[-1]:
last_element = self.driver.find_elements_by_css_selector('a.FPmhX.notranslate._0imsa')[-1]
self.driver.execute_script('arguments[0].scrollIntoView(true);', last_element)
time.sleep(1)
# get links to followers
followers_link = [follower.get_attribute('href') for follower in self.driver.find_elements_by_css_selector('a.FPmhX.notranslate._0imsa')]
for follower_link in followers_link:
# connect to follower profile
self.profile_url = follower_link
instagram_info_obj.connect_to_profile()
# get count of followers
self.get_followers_count()
# write to csv
self.append_to_csv()
def append_to_csv(self):
"""
write profile row and followers count into csv file
"""
with open('instagramInfo.csv', mode='a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerow([self.profile_url, self.followers_count])
if __name__ == "__main__":
instagram_info_obj = instaInfo()
instagram_info_obj.login_to_instagram()
instagram_info_obj.connect_to_profile()
instagram_info_obj.get_profile_followers()
instagram_info_obj.driver.quit()
|
bfesiuk/InstagramInfo
|
info.py
|
info.py
|
py
| 4,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25127087011
|
#!/usr/bin/env python
import random
from scapy.all import *
hwKali = "00:00:00:00:00:04"
hwGateway = "00:00:00:00:00:03"
hwVictim = "00:00:00:00:00:05"
broadcast = "ff:ff:ff:ff:ff:ff"
ipGateway = "10.10.111.1"
ipVictim = "10.10.111.101"
pG = Ether(src = hwKali, dst = broadcast)\
/ ARP(hwsrc = hwKali, hwdst = hwGateway, psrc = ipVictim, pdst = ipGateway, op = 1)
pV = Ether(src = hwKali, dst = broadcast)\
/ ARP(hwsrc = hwKali, hwdst = hwVictim, psrc = ipGateway, pdst = ipVictim, op = 1)
while True:
sendp(pG)
sendp(pV)
if __name__ == "__main__": main()
|
davidkim827/Network-Security
|
arpspoof.py
|
arpspoof.py
|
py
| 574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72650165307
|
def ID ():
IS = int(input("กรอกรหัสนักเรียน : "))
US = input("กรอกชื่อนักเรียน : ")
PS = float(input("กรอกเกรดเฉลี่ยของนักเรียน : "))
return IS , US , PS
def cal ( PS ):
if PS < 2:
NOPE = "ไม่ผ่าน"
else :
NOPE = "ผ่าน"
return NOPE
def show ( IS , US , PS , NOPE ):
print (f"| รหัสนักเรียน : {IS} | ชื่อนักเรียน : {US} |")
print (f"| เกรดเฉลี่ยของนักเรียน : {PS} | ผลการเรียน : {NOPE} |")
print ("***----------------***")
IS , US , PS = ID ()
print ("***----------------***")
NOPE = cal ( PS )
show ( IS , US , PS , NOPE )
print ("***----------------***")
|
HowToPlayMeow/WorkshopA
|
py13.py
|
py13.py
|
py
| 853 |
python
|
th
|
code
| 0 |
github-code
|
6
|
36568700730
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo import exceptions
from odoo.exceptions import ValidationError
import json
import datetime
import string
import requests
from datetime import date
import logging
_logger = logging.getLogger(__name__)
class hr_report(models.Model):
_name = 'hr_report'
_description = 'HR Reports'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'id DESC'
name = fields.Selection([('Statement Letter','Statement Letter'),('HR Letter','HR Letter')], string="Document", index=True, required=True, tracking=True)
x_employee_id = fields.Many2one('hr.employee', string="Employee", store=True, required=True, tracking=True, index=True)
state = fields.Selection([
('Draft', 'Draft'),('Submit', 'Submit'),('Completed', 'Completed')
], string='Report State' ,default='Draft', index=True, tracking=True)
active = fields.Boolean(string='Active',index=True,default=True)
def submit_report(self):
for _rec in self:
_rec.state = 'Submit'
def print_document(self):
for _rec in self:
#_view_name = ""
_context = ""
_res_model = ""
_target = "new"
_name = ""
if _rec.name == "Statement Letter":
_name = "Statement Letter"
_res_model = 'hr_statement_document'
_context = {
'default_x_employee_id': _rec.x_employee_id.id,
'default_x_hr_report_id': _rec.id,
}
elif _rec.name == "HR Letter":
_name = "HR Letter"
_res_model = 'hr_letter_document'
_context = {
'default_x_employee_id': _rec.x_employee_id.id,
'default_x_hr_report_id': _rec.id,
}
else:
return False
return {
'name': _name,
'view_type': 'form',
'view_mode': 'form',
'res_model': _res_model,
'type': 'ir.actions.act_window',
'target': _target,
'res_id': False,
'context': _context,
}
|
AMohamed389/airport4
|
hr_extend_minds/models/hr_report.py
|
hr_report.py
|
py
| 2,381 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38146173334
|
import unittest
import hashlib
from core import *
class StoneTest(unittest.TestCase):
def test_get_stone_report(self):
r = get_stone_report("20171026")
h = hashlib.sha1(r.encode('utf-8'))
hexrhash = h.hexdigest()
defaulth = "1d6ad2ea634514c7ef6225fd15c332cb52ed45fd"
self.assertEqual(hexrhash,defaulth)
def test_get_gross_amount(self):
report = fromstring(get_stone_report("20171026"))
self.assertEqual(get_gross_amount(report), 2501.0)
def test_get_net_amount(self):
report = fromstring(get_stone_report("20171026"))
self.assertEqual(get_net_amount(report), 2426.3579999999997)
def test_get_prevision(self):
report = fromstring(get_stone_report("20171026"))
prevision_date = len(builtins.dict(get_prevision(report)))
self.assertEqual(prevision_date, 5)
if __name__ == '__main__':
unittest.main()
|
tocvieira/StonePagamentos
|
test_core.py
|
test_core.py
|
py
| 922 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74732375868
|
import tensorflow as tf
def conv2d(x, kernel_shape, strides=1, relu=True, padding='SAME'):
W = tf.get_variable("weights", kernel_shape, initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False))
tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)
b = tf.get_variable("biases", kernel_shape[3], initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False))
with tf.name_scope("conv"):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding=padding)
x = tf.nn.bias_add(x, b)
tf.summary.histogram("W", W)
tf.summary.histogram("b", b)
if kernel_shape[2] == 3:
x_min = tf.reduce_min(W)
x_max = tf.reduce_max(W)
kernel_0_to_1 = (W - x_min) / (x_max - x_min)
kernel_transposed = tf.transpose(kernel_0_to_1, [3, 0, 1, 2])
tf.summary.image('filters', kernel_transposed, max_outputs=3)
if relu:
x = tf.nn.relu(x)
return x
|
fabiotosi92/CCNN-Tensorflow
|
model/ops.py
|
ops.py
|
py
| 981 |
python
|
en
|
code
| 22 |
github-code
|
6
|
31533542656
|
airline_name = input()
number_tickets_adults = int(input())
number_tickets_kids = int(input())
net_price_ticket_adult = float(input())
tax_service = float(input())
net_price_ticket_kid = net_price_ticket_adult * 0.3
total_price_adult_tickets = (net_price_ticket_adult + tax_service) * number_tickets_adults
total_price_kid_tickets = (net_price_ticket_kid + tax_service) * number_tickets_kids
total_price_tickets = total_price_kid_tickets + total_price_adult_tickets
company_profit = total_price_tickets * 0.2
print(f"The profit of your agency from {airline_name} tickets is {company_profit:.2f} lv.")
|
iliyan-pigeon/Soft-uni-Courses
|
programming_basics_python/exams/exam_2020/agency_profit.py
|
agency_profit.py
|
py
| 601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20968158774
|
import math
from flask import Flask, render_template, request, jsonify
import pickle
import pandas as pd
import numpy as np
import mariadb
import jinja2
conn = mariadb.connect(
user="root",
password="root",
host="localhost",
database="pal_taqdeer")
cur = conn.cursor()
app = Flask(__name__)
@app.route('/')
def index():
query = "select * from student_forecast"
cur.execute(query)
data = list(cur)
return render_template("index.html", data=data)
with open("model.pkl", "rb") as f:
model = pickle.load(f)
@app.route('/result', methods=['POST'])
def result():
age = request.form.get('age')
medu = request.form.get('medu')
fedu = request.form.get('fedu')
failures = request.form.get('failures')
higher = request.form.get('higher')
romantic = request.form.get('romantic')
g1 = request.form.get('g1')
g2 = request.form.get('g2')
gouout = request.form.get('gouout')
if not age or int(age) < 15 or int(age) > 22:
return jsonify({'error' : 'Invalid Age'})
if medu == "":
return jsonify({'error' : 'Invalid Mother Education Status'})
if fedu == "":
return jsonify({'error' : 'Invalid Father Education Status'})
if failures == "" or int(failures) > 4:
return jsonify({'error' : 'Invalid Failures No.'})
if not higher:
return jsonify({'error' : 'Invalid Higher Education Status'})
if not romantic:
return jsonify({'error' : 'Invalid Romantic Status'})
if g1 == "" or int(g1) > 20:
return jsonify({'error' : 'Invalid First Period Grade'})
if g2 == "" or int(g2) > 20:
return jsonify({'error' : 'Invalid Second Period Grade'})
if not int(gouout) or int(gouout) > 5:
return jsonify({'error' : 'Invalid Hang Out Status'})
inputs = [age, medu, fedu, failures,
higher, g1, g2, romantic, gouout]
df = pd.DataFrame([np.array(inputs)], columns=[
'age', 'Medu', 'Fedu', 'failures', 'higher_yes', 'G1', 'G2', 'romantic_no', 'goout'])
finalGrade = model.predict(df)
final_grade_rounded = round(float(finalGrade))
query = f""" INSERT INTO student_forecast (age, medu, fedu, failures, higher, romantic, g1, g2, gouout)
VALUES ('{age}','{medu}','{fedu}','{failures}','{higher}','{romantic}','{g1}','{g2}','{gouout}'); """
cur.execute(query)
conn.commit()
return jsonify({'grade': final_grade_rounded})
if __name__ == "__main__":
app.run(debug=True)
|
sondosaabed/PalTaqdeer
|
app.py
|
app.py
|
py
| 2,489 |
python
|
en
|
code
| 5 |
github-code
|
6
|
25070989865
|
import logging
from django.urls import path
from rest_framework import status
from rest_framework.response import Response
from rest_framework.request import Request
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from purplship.server.core.views.api import APIView
from purplship.server.proxy.router import router
from purplship.server.serializers import SerializerDecorator
from purplship.server.core.gateway import Pickups
from purplship.server.core.serializers import (
PickupCancelRequest,
PickupUpdateRequest,
OperationResponse,
PickupResponse,
PickupRequest,
ErrorResponse,
TestFilters,
MODELS,
)
logger = logging.getLogger(__name__)
ENDPOINT_ID = "@" # This endpoint id is used to make operation ids unique make sure not to duplicate
CARRIER_NAMES = list(MODELS.keys())
class PickupDetails(APIView):
@swagger_auto_schema(
tags=['Proxy'],
operation_id=f"{ENDPOINT_ID}schedule_pickup",
operation_summary="Schedule a pickup",
query_serializer=TestFilters(),
request_body=PickupRequest(),
responses={200: PickupResponse(), 400: ErrorResponse()},
manual_parameters=[
openapi.Parameter('carrier_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, enum=CARRIER_NAMES),
],
)
def post(self, request: Request, carrier_name: str):
"""
Schedule one or many parcels pickup
"""
test_filter = SerializerDecorator[TestFilters](data=request.query_params).data
payload = SerializerDecorator[PickupRequest](data=request.data).data
response = Pickups.schedule(payload, context=request, carrier_name=carrier_name, **test_filter)
return Response(PickupResponse(response).data, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
tags=['Proxy'],
operation_id=f"{ENDPOINT_ID}update_pickup",
operation_summary="Update a pickup",
query_serializer=TestFilters(),
request_body=PickupUpdateRequest(),
responses={200: PickupResponse(), 400: ErrorResponse()},
manual_parameters=[
openapi.Parameter('carrier_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, enum=CARRIER_NAMES),
],
)
def put(self, request: Request, carrier_name: str):
"""
Modify a scheduled pickup
"""
test_filter = SerializerDecorator[TestFilters](data=request.query_params).data
payload = SerializerDecorator[PickupUpdateRequest](data=request.data).data
response = Pickups.update(payload, context=request, carrier_name=carrier_name, **test_filter)
return Response(PickupResponse(response).data, status=status.HTTP_200_OK)
class PickupCancel(APIView):
@swagger_auto_schema(
tags=['Proxy'],
operation_id=f"{ENDPOINT_ID}cancel_pickup",
operation_summary="Cancel a pickup",
query_serializer=TestFilters(),
request_body=PickupCancelRequest(),
responses={200: OperationResponse(), 400: ErrorResponse()},
manual_parameters=[
openapi.Parameter('carrier_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, enum=CARRIER_NAMES),
],
)
def post(self, request: Request, carrier_name: str):
"""
Cancel a pickup previously scheduled
"""
test_filter = SerializerDecorator[TestFilters](data=request.query_params).data
payload = SerializerDecorator[PickupCancelRequest](data=request.data).data
response = Pickups.cancel(payload, context=request, carrier_name=carrier_name, **test_filter)
return Response(OperationResponse(response).data, status=status.HTTP_200_OK)
router.urls.append(path('proxy/pickups/<carrier_name>', PickupDetails.as_view(), name="pickup-details"))
router.urls.append(path('proxy/pickups/<carrier_name>/cancel', PickupCancel.as_view(), name="pickup-cancel"))
|
danh91/purplship
|
server/modules/proxy/purplship/server/proxy/views/pickup.py
|
pickup.py
|
py
| 3,927 |
python
|
en
|
code
| null |
github-code
|
6
|
6114242445
|
import argparse
import gym
import numpy as np
from itertools import count
from collections import namedtuple
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from tensorboardX import SummaryWriter
from hebbian_learning.models.equilibrium_propagation_value import Equilibrium_Propagation_Value_Network
from hebbian_learning.models.qt_opt_equil_prop import Qt_Opt_Equil_Prop
from hebbian_learning.models.mlp import MLP
from hebbian_learning.models.qt_opt import Qt_Opt
parser = argparse.ArgumentParser(description='PyTorch RL Example')
parser.add_argument('--equil_prop', type=bool, default=True)
parser.add_argument('--seed', type=int, default=1337)
parser.add_argument('--render', type=bool, default=True)
parser.add_argument('--log-interval', type=int, default=1)
# Equil Prop
parser.add_argument('--energy_learn_rate', type=float, default=0.1)
parser.add_argument('--learning_rate', type=float, default=0.01)
parser.add_argument('--epsilon', type=float, default=0.9)
parser.add_argument('--gamma', type=float, default=0.99)
# parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--target_replace_period', type=int, default=10)
# parser.add_argument('--memory_capacity', type=int, default=256)
parser.add_argument('--num_hidden', type=int, default=64)
parser.add_argument('--n_iterations', type=int, default=1)
parser.add_argument('--n_iterations_neg', type=int, default=1)
parser.add_argument('--beta', type=float, default=0.5)
# MLP
# parser.add_argument('--learning_rate', type=float, default=0.01)
# parser.add_argument('--gamma', type=float, default=0.99)
# parser.add_argument('--epsilon', type=float, default=0.95)
# parser.add_argument('--batch_size', type=int, default=16)
# parser.add_argument('--target_replace_period', type=int, default=10)
# parser.add_argument('--memory_capacity', type=int, default=256)
# parser.add_argument('--num_hidden', type=int, default=64)
args = parser.parse_args()
# args.beta = -np.log(1-args.beta)
# env = gym.make('MountainCar-v0')
env = gym.make('CartPole-v0')
# env = env.unwrapped
env.seed(args.seed)
torch.manual_seed(args.seed)
writer = SummaryWriter()
N_ACTIONS = 1 # env.action_space.n
N_STATES = env.observation_space.shape[0]
def main():
if args.equil_prop:
network = Equilibrium_Propagation_Value_Network(N_STATES + N_ACTIONS, 1, args.num_hidden,
args.energy_learn_rate, args.learning_rate, args.n_iterations,
args.n_iterations_neg, args.beta)
rl_model = Qt_Opt_Equil_Prop(network, N_STATES, N_ACTIONS, args.target_replace_period, args.epsilon, args.gamma)
else:
network = MLP(N_STATES + N_ACTIONS, 1, args.num_hidden, args.learning_rate)
rl_model = Qt_Opt(network, N_STATES, N_ACTIONS, args.memory_capacity,
args.batch_size, args.target_replace_period, args.epsilon, args.gamma)
running_reward = 20
for i_episode in range(100000):
s = env.reset()
ep_r = 0
total_cost = 0
for t in range(100000):
if args.render:
env.render()
a = rl_model.choose_action(s)
s_, r, done, info = env.step(a)
if not args.equil_prop:
rl_model.store_transition(s, a, r, done, s_)
rl_model.learn()
else:
cost = rl_model.learn(s, a, r, done, s_)
s = s_
ep_r += r
total_cost += cost
if done:
writer.add_scalar('data/episode_reward', t, i_episode)
writer.add_scalar('data/average_cost', total_cost / t, i_episode)
running_reward = running_reward * 0.99 + ep_r * 0.01
print('Episode {}\treward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_r, running_reward))
break
env.close()
if __name__ == '__main__':
main()
|
ltecot/emergence_properties
|
hebbian_learning/envs/cartpole.py
|
cartpole.py
|
py
| 4,095 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25785453821
|
'''
the application of the matrix
'''
import xlrd
import matplotlib.pyplot as plt
from config import *
from model import Dmu
# plt.rcParams['text.usetex']=True
# # Place the command in the text.latex.preamble using rcParams
# plt.rcParams['text.latex.preamble']=r'\makeatletter \newcommand*{\rom}[1]{\bfseries\expandafter\@slowromancap\romannumeral #1@} \makeatother'
plt.xkcd()
def split(func):
def _wrapper(*args, **kw):
dmus = func(*args, **kw)
return ([item for item in dmus if item.dpei <= 3.33 and item.dyct <= 3.33],
[item for item in dmus if item.dpei > 3.33 and item.dyct <= 3.33],
[item for item in dmus if item.dpei > 3.33 and item.dyct > 3.33],
[item for item in dmus if item.dpei <= 3.33 and item.dyct > 3.33])
return _wrapper
@split
def read_data():
workbook = xlrd.open_workbook(XLS_FILE_NAME)
sheet = workbook.sheets()[SHEET_NO]
dmus = []
for row_index in range(ROW_FROM, ROW_TO+1):
row = sheet.row_values(row_index)[COLUMN_FROM:COLUMN_TO+1]
dmus.append(Dmu(row[0], float(row[1]), float(row[2])))
return dmus
def _write_dmus(dmus, color=None, marker=None):
name = [item.name for item in dmus]
dpeis = [item.dpei for item in dmus]
dpcts = [item.dyct for item in dmus]
plt.scatter(dpeis,dpcts, c=color, marker=marker,zorder=2)
def _generate_dict(dmus):
dmu_dict = {}
for dmu in dmus:
dmu_dict[dmu.name]=[dmu.dpei, dmu.dyct]
return dmu_dict
def draw():
dmus_left_bottom, dmus_right_bottom, dmus_right_top, dmus_left_top = read_data()
# draw point
_write_dmus(dmus_left_bottom, '#76EE00','^')
_write_dmus(dmus_right_bottom, '#EE4000','2')
_write_dmus(dmus_right_top, '#4F94CD','o')
_write_dmus(dmus_left_top, '#DAA520','s')
#decoration
plt.xlim(-0.5, 10.5)
plt.ylim(-0.5, 10.5)
plt.plot([-0.5, 10], [3.33,3.33], '--', c='k', linewidth=0.8)
plt.plot([3.33,3.33],[0, 10.5], '--', c='k', linewidth=0.8)
plt.text(3.33, -0.25, s='3.33', ha='center', va='center', fontsize=9)
plt.text(10.3, 3.33, s='3.33', ha='center', va='center', rotation=90,fontsize=9)
# annotations
for dmu in dmus_right_top:
plt.text(dmu.dpei+0.3, dmu.dyct+0.32, s=dmu.name, ha='center', va='center',fontsize=9)
for dmu in dmus_left_top:
plt.text(dmu.dpei-0.3, dmu.dyct+0.32, s=dmu.name, ha='center', va='center', fontsize=9)
#draw right_bottom
dmu_dict = _generate_dict(dmus_right_bottom)
plt.text(dmu_dict['Hunan'][0]+0.3,dmu_dict['Hunan'][1]-0.32, s='Hunan',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Guizhou'][0]+0.3,dmu_dict['Guizhou'][1]-0.32, s='Guizhou',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Jilin'][0]+0.3,dmu_dict['Jilin'][1]-0.3, s='Jilin',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Anhui'][0]+0.5,dmu_dict['Anhui'][1]-0.05, s='Anhui',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Guangxi'][0]+0.2,dmu_dict['Guangxi'][1]-0.32, s='Guangxi',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Zhejiang'][0]+0.2,dmu_dict['Zhejiang'][1]-0.05, s='Zhejiang',
ha='left', va='center', fontsize=9)
# draw left bottom
dmu_dict.clear()
dmu_dict = _generate_dict(dmus_left_bottom)
plt.text(dmu_dict['Hainan'][0]+0.4,dmu_dict['Hainan'][1]-0.35, s='Hainan',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Beijing'][0]+0.4,dmu_dict['Beijing'][1]-0.35, s='Beijing',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Qinghai'][0]-0.1, dmu_dict['Qinghai'][1]-0.2, s='Qinghai',
ha='right', va='center', fontsize=9)
plt.text(dmu_dict['Shanghai'][0]-0.1, dmu_dict['Shanghai'][1]-0.5, s='Shanghai',
ha='center', va='center', fontsize=9)
plt.plot([dmu_dict['Shanghai'][0],dmu_dict['Shanghai'][0]-0.1],
[dmu_dict['Shanghai'][1], dmu_dict['Shanghai'][1]-0.4],
'-',
c='k',linewidth=0.5,zorder=1)
plt.text(dmu_dict['Xinjiang'][0]-0.1, dmu_dict['Xinjiang'][1]+0.4, s='Xinjiang',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Ningxia'][0]-0.1, dmu_dict['Ningxia'][1]+0.35, s='Ningxia',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Tianjin'][0]+.4, dmu_dict['Tianjin'][1]+0.25, s='Tianjin',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Gansu'][0]+0.45, dmu_dict['Gansu'][1]+0.1, s='Gansu',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Shannxi'][0]+0.1, dmu_dict['Shannxi'][1]+0.4, s='Shannxi',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Fujian'][0]+0.3, dmu_dict['Fujian'][1]+0.5, s='Fujian',
ha='left', va='center', fontsize=9)
plt.plot([dmu_dict['Fujian'][0], dmu_dict['Fujian'][0]+0.5],
[dmu_dict['Fujian'][1], dmu_dict['Fujian'][1]+0.4],
'-',
c='k',linewidth=0.5,zorder=1)
plt.text(dmu_dict['Yunnan'][0], dmu_dict['Yunnan'][1]+0.15, s='Yunnan',
ha='left', va='center', fontsize=9)
plt.text(dmu_dict['Chongqing'][0]-0.2, dmu_dict['Chongqing'][1]-0.4, s='Chongqing',
ha='left', va='center', fontsize=9)
plt.text(dmu_dict['Heilongjiang'][0]+0.1, dmu_dict['Heilongjiang'][1]-0.7, s='Heilongjiang',
ha='left', va='center', fontsize=9)
plt.text(dmu_dict['Jiangxi'][0]-0.1, dmu_dict['Jiangxi'][1]-1, s='Jiangxi',
ha='left', va='center', fontsize=9)
plt.plot([dmu_dict['Heilongjiang'][0], dmu_dict['Heilongjiang'][0]+0.4],
[dmu_dict['Heilongjiang'][1], dmu_dict['Heilongjiang'][1]-0.55],
'-',c='k',linewidth=0.5, zorder=1)
plt.plot([dmu_dict['Jiangxi'][0], dmu_dict['Jiangxi'][0]+0.2],
[dmu_dict['Jiangxi'][1], dmu_dict['Jiangxi'][1]-0.9],
'-',c='k',linewidth=0.5, zorder=1)
plt.show()
if __name__ == '__main__':
draw()
|
gaufung/CodeBase
|
PDA/matrix/app.py
|
app.py
|
py
| 6,090 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11437321996
|
class student:
def __init__(self,name,age):
self.name=name
self.age=age
def get_age(self):
return self.age
def main():
student1=student("sham",20)
student2=student("andy",15)
print(student1.age)
del student1.age
# print(student.age)
print(student1.name)
del student1.name
if __name__=="__main__":
main()
|
sun9085/python
|
delete_object.py
|
delete_object.py
|
py
| 387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12938444153
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from jeopardy.views import (
QuestionList,
QuestionById,
QuestionRandom,
PlayerList,
PlayerByName,
PlayerById,
PlayerQuestionById,
PlayerQuestionByName,
)
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^questions/$', QuestionList.as_view()),
url(r'^questions/id/(?P<pk>[0-9]+)/$', QuestionById.as_view()),
url(r'^questions/random/$', QuestionRandom.as_view()),
url(r'^players/$', PlayerList.as_view()),
url(r'^players/name/(?P<name>.+)/$', PlayerByName.as_view()),
url(r'^players/id/(?P<pk>[0-9]+)/$', PlayerById.as_view()),
url(r'^players/name/(?P<name>.+)/question/(?P<question_id>[0-9]+)/$', PlayerQuestionByName.as_view()),
url(r'^players/id/(?P<player_id>[0-9]+)/question/(?P<question_id>[0-9]+)/$', PlayerQuestionById.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
ryanwholey/jeopardy_bot
|
trabek_bot/jeopardy/urls.py
|
urls.py
|
py
| 982 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26013197336
|
from homeassistant.config_entries import ConfigEntry
from .const import (
PLATFORM,
PRESET_MODE_HOLIDAY,
PRESET_MODE_MANUAL,
PRESET_MODE_SCHEDULE_1,
PRESET_MODE_SCHEDULE_2,
PRESET_MODE_SCHEDULE_3,
PRESET_MODE_TEMP_OVERRIDE,
PRESET_MODE_ANTIFROST,
BAXI_PRESET_MANUAL,
BAXI_PRESET_SCHEDULE,
)
from homeassistant.components.climate.const import (
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
)
import datetime
from datetime import timedelta
def preset_mode_baxi_to_ha(baxi_mode, program=None):
if baxi_mode == "manual":
return PRESET_MODE_MANUAL
elif baxi_mode == "temporary-override":
return PRESET_MODE_TEMP_OVERRIDE
elif baxi_mode == "anti-frost":
return PRESET_MODE_ANTIFROST
elif baxi_mode == "schedule" and program == 1:
return PRESET_MODE_SCHEDULE_1
elif baxi_mode == "schedule" and program == 2:
return PRESET_MODE_SCHEDULE_2
elif baxi_mode == "schedule" and program == 3:
return PRESET_MODE_SCHEDULE_3
elif baxi_mode == "holiday":
return PRESET_MODE_HOLIDAY
def preset_mode_ha_to_baxi(ha_mode):
if ha_mode == PRESET_MODE_MANUAL:
return BAXI_PRESET_MANUAL, "manual"
elif ha_mode == PRESET_MODE_SCHEDULE_1:
return BAXI_PRESET_SCHEDULE, "1"
elif ha_mode == PRESET_MODE_SCHEDULE_2:
return BAXI_PRESET_SCHEDULE, "2"
elif ha_mode == PRESET_MODE_SCHEDULE_3:
return BAXI_PRESET_SCHEDULE, "3"
def hvac_mode_baxi_to_ha(raw_mode):
if raw_mode == "off":
return HVAC_MODE_OFF
elif raw_mode == "heating-auto":
return HVAC_MODE_AUTO
def hvac_mode_ha_to_baxi(ha_mode):
if ha_mode == HVAC_MODE_AUTO:
return "heating-auto"
elif ha_mode == HVAC_MODE_OFF:
return "off"
def create_override_date(target_time, days_offset):
now = datetime.datetime.now()
override_date = now + timedelta(days=days_offset)
target_hour = int(target_time.split(":")[0])
target_minutes = int(target_time.split(":")[1])
override_date = override_date.replace(
hour=target_hour, minute=target_minutes, second=0, microsecond=0
)
return override_date.isoformat("T", "minutes")
|
vipial1/BAXI_thermostat
|
custom_components/baxi_thermostat/helper.py
|
helper.py
|
py
| 2,194 |
python
|
en
|
code
| 9 |
github-code
|
6
|
15521328462
|
blocks = set()
with open("input", "r") as file:
for line in iter(file.readline, ''):
blocks.add(tuple(map(int, line.rstrip().split(','))))
covered = 0
for block in blocks:
for dir in [1, -1]:
if (block[0] + dir, block[1], block[2]) in blocks:
covered += 1
if (block[0], block[1] + dir, block[2]) in blocks:
covered += 1
if (block[0], block[1], block[2] + dir) in blocks:
covered += 1
max_x = max(blocks, key=lambda x: x[0])[0]
max_y = max(blocks, key=lambda x: x[1])[1]
max_z = max(blocks, key=lambda x: x[2])[2]
internal = 0
for x in range(max_x+1):
for y in range(max_y+1):
for z in range(max_z+1):
int_cov = 0
block = (x, y, z)
for dir in [1, -1]:
if (block[0] + dir, block[1], block[2]) in blocks:
int_cov += 1
if (block[0], block[1] + dir, block[2]) in blocks:
int_cov += 1
if (block[0], block[1], block[2] + dir) in blocks:
int_cov += 1
if int_cov == 6 and not block in blocks:
internal += 1
print(covered)
sa = 6*len(blocks) - covered
print(sa)
print(sa - (6*internal))
|
probablyanasian/advent-of-code
|
2022/18/b.py
|
b.py
|
py
| 1,046 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37971441649
|
'''
Log Optional sensors to CouchDB
Called from /home/pi/MVP/scripts/LogMVP.sh - needs to be uncommented for this to run
Uncomment desired functions in makeEnvObsv
Author: Howard Webb
Date: 5/3/2019
'''
from LogUtil import Logger
from Persistence import Persistence
class LogSensorsExtra(object):
def __init__(self, lvl=Logger.INFO):
"""Record optional sensor data
Args:
lvl: Logging level
Returns:
None
Raises:
None
"""
self._logger = Logger("LogSensor-Extra", lvl, file="/home/pi/MVP/logs/obsv.log")
self._activity_type = "Environment_Observation"
self._test=False
self._persist = Persistence(self._logger)
def getOneWire(self, test=False):
"""Loop OneWire temperature sensors
Assumes there are four
Args:
test: flag for testing
Returns:
None
Raises:
None
"""
self._logger.debug("In getOneWire")
from OneWireTemp import OneWireTemp
for sensor in OneWireTemp.one_temp:
self.logOneWire(sensor, OneWireTemp.one_temp[sensor])
def logOneWire(self, sensor, name, test=False):
"""Record OneWire temperature sensor
Args:
sensor: number of the sensor
name: name of the sensor
test: flag for testing
Returns:
None
Raises:
None
"""
self._logger.debug("In logOneWire")
from OneWireTemp import OneWireTemp
try:
ow=OneWireTemp()
temp = ow.getTempC(sensor)
status_qualifier = 'Success'
if self._test:
status_qualifier = 'Test'
rec = [self._activity_type, '', name, 'Air', 'Temperature', "{:10.1f}".format(temp), 'Centigrade', 'DS18B20-' + str(sensor), status_qualifier,'']
self._persist.save(rec)
self._logger.info("{}, {}, {:10.1f}".format(name, status_qualifier, temp))
except Exception as e:
status_qualifier = 'Failure'
if test:
status_qualifier = 'Test'
rec = [self._activity_type, '', name, 'Air', 'Temperature', '', 'Centigrade', 'DS18B20-' + str(sensor), status_qualifier, str(e)]
self._persist.save(rec)
self._logger.error("{}, {}, {}".format(name, status_qualifier, e))
def getLux(self, test=False):
"""Record LUX sensor (TSL2561)
Args:
test: flag for testing
Returns:
None
Raises:
None
"""
from TSL2561 import TSL2561
lx = TSL2561()
self._logger.info("TSL2561 - LUX")
try:
lux = lx.getLux()
status_qualifier = 'Success'
if test:
status_qualifier = 'Test'
rec = [self._activity_type, '', 'Canopy', 'Light', 'LUX', "{:3.1f}".format(lux), 'lux', 'TSL2561', status_qualifier,'']
self._persist.save(rec)
self._logger.info("{}, {}, {:10.1f}".format("LUX", status_qualifier, lux))
except Exception as e:
status_qualifier = 'Failure'
if test:
status_qualifier = 'Test'
rec = [self._activity_type, '', 'Canopy', 'Light', 'LUX', '', 'lux', 'TSL2561', status_qualifier,str(e)]
self._persist.save(rec)
self._logger.error("{}, {}, {}".format("LUX", status_qualifier, e))
def getEC(self, test=False):
"""Record EC sensor (EC - ADC reading)
Args:
test: flag for testing
Returns:
None
Raises:
None
"""
from EC import EC
self._logger.info("EC")
try:
s = EC()
ec = s.getEC()
status_qualifier = 'Success'
if test:
status_qualifier = 'Test'
print("{}, {}, {:10.1f}".format("EC", status_qualifier, ec))
rec = [self._activity_type, '', 'Reservoir', 'Nutrient', 'EC', "{:3.1f}".format(ec), 'EC', 'EC', status_qualifier,'']
self._persist.save(rec)
self._logger.info("{}, {}, {:10.1f}".format("EC", status_qualifier, ec))
except Exception as e:
status_qualifier = 'Failure'
if test:
status_qualifier = 'Test'
print("{}, {}, {:10.1f}".format("EC", status_qualifier, ec))
rec = [self._activity_type, '', 'Reservoir', 'Nutrient', 'EC', '', 'EC', 'EC', status_qualifier,str(e)]
self._persist.save(rec)
self._logger.error("{}, {}, {}".format("EC CCS811", status_qualifier, e))
def getCO2_NDIR(self, test=False):
"""Record CO2 sensor (NDIR)
Args:
test: flag for testing
Returns:
None
Raises:
None
"""
from NDIR import Sensor
self._logger.info("CO2 - NDIR")
try:
sensor = Sensor()
sensor.begin()
co2=sensor.getCO2()
status_qualifier = 'Success'
if test:
status_qualifier = 'Test'
print("{}, {}, {:10.1f}".format("CO2 Canopy", status_qualifier, co2))
rec = [self._activity_type, '', 'Canopy', 'Air', 'CO2', "{:3.1f}".format(co2), 'ppm', 'MH-Z16-NDIR', status_qualifier,'']
self._persist.save(rec)
self._logger.debug("{}, {}, {:10.1f}".format("CO2", status_qualifier, co2))
except Exception as e:
status_qualifier = 'Failure'
if test:
status_qualifier = 'Test'
print("{}, {}, {:10.1f}".format("CO2 Canopy", status_qualifier, co2))
rec = [self._activity_type, '', 'Canopy', 'Air', 'CO2', '', 'ppm', 'MH-Z16-NDIR', status_qualifier,str(e)]
self._persist.save(rec)
self._logger.error("{}, {}, {}".format("CO2 NDIR", status_qualifier, e))
def getCO2_CCS811(self, test=False):
"""Record CO2 sensor (CCS811)
Args:
test: flag for testing
Returns:
None
Raises:
None
"""
from CCS811 import CCS811
self._logger.info("CO2 CCS811")
try:
sensor = CCS811(SLAVE)
co2 = sensor.get_co2()
status_qualifier = 'Success'
if test:
status_qualifier = 'Test'
print("{}, {}, {:10.1f}".format("CO2 Canopy", status_qualifier, co2))
rec = [self._activity_type, '', 'Canopy', 'Air', 'CO2', "{:3.1f}".format(co2), 'ppm', 'CCS811', str(e)]
self._persist.save(rec)
self._logger.debug("{}, {}, {:10.1f}".format("CCS811 - CO2", status_qualifier, co2))
except Exception as e:
status_qualifier = 'Failure'
if test:
status_qualifier = 'Test'
print("{}, {}, {:10.1f}".format("CO2 Canopy", status_qualifier, co2))
rec = [self._activity_type, '', 'Canopy', 'Air', 'CO2','', 'ppm', 'CCS811', status_qualifier,str(e)]
self._persist.save(rec)
self._logger.error("{}, {}, {}".format("CO2 CCS811", status_qualifier, e))
def getSCD(self):
"""Record CO2 sensor (scd30)
Generates co2, temperature and relative humidity
Args:
None
Returns:
None
Raises:
None
"""
from scd30 import SCD30
self._scd = SCD30(self._logger)
self._logger.debug("In SCD30")
try:
co2, temp, rh = self._scd.get_data()
status = 'Success'
if self._test:
status = 'Test'
c_rec = ['Environment_Observation', '', 'Top', 'Air', 'CO2', "{:10.1f}".format(co2), 'ppm', 'scd30', status, '']
t_rec = ['Environment_Observation', '', 'Top', 'Air', 'Temperature', "{:10.1f}".format(temp), 'Centigrade', 'scd30', status, '']
h_rec = ['Environment_Observation', '', 'Top', 'Air', 'Humidity', "{:10.1f}".format(rh), 'Percent', 'scd30', status, '']
self._persist.save(c_rec)
self._persist.save(t_rec)
self._persist.save(h_rec)
self._logger.info("{} {:6.1f}, {} {:3.1f}, {} {:3.1f}".format("EnvObsv-CO2:", co2, "Temp", temp, "Humidity:", rh))
except Exception as e:
status = 'Failure'
if self._test:
status = 'Test'
c_rec = ['Environment_Observation', '', 'Top', 'Air', 'CO2', '', 'ppm', 'scd30', status, str(e)]
t_rec = ['Environment_Observation', '', 'Top', 'Air', 'Temperature', '', 'Centigrde', 'scd30', status, '']
h_rec = ['Environment_Observation', '', 'Top', 'Air', 'Humidity', '', 'Percent', 'scd30', status, '']
self._persist.save(c_rec)
self._persist.save(t_rec)
self._persist.save(h_rec)
self._logger.debug("{} {}".format("EnvObsv-SCD30 Error:", e))
def log(self):
'''Log extra sensors
Uncomment desired sensors
Imports are in the function to avoid loading unnecessary code
'''
#self.getOneWire()
self.getLux()
self.getEC()
self.getCO2_NDIR()
#self.getCO2_CCS811()
self.getSCD()
def test():
'''
Use for debugging, outputs detail data
'''
print("Testing SDC30")
ls = LogSensorsExtra()
ls._logger.setLevel(Logger.DEBUG)
ls._test = True
ls.log()
def validate():
'''
Exercise the function to make sure it is working correctly
Logs valid data
'''
print("Validate SDC30")
main(Logger.INFO)
def main(level=Logger.INFO):
'''
Function that should get called from scripts
'''
ls = LogSensorsExtra()
ls._logger.setLevel(level)
ls.log()
if __name__=="__main__":
main()
|
webbhm/NerdFarm
|
MVP/python/LogSensorsExtra.py
|
LogSensorsExtra.py
|
py
| 10,776 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23713666857
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 22:16:45 2023
@author: EMILIANO
"""
import openpyxl
import pandas as pd
##Workbook va en mayusculas la primera
from openpyxl import Workbook
Excelworkbook=openpyxl.load_workbook("H:\Documentos\Practica Pyhton Bond Arg\Dataset bonos arg usd.xlsx")
Excelsheet=Excelworkbook.active
#obtener los titulos
titulos= next(Excelsheet.values)[0:]
bondarg=pd.DataFrame(Excelsheet.values,columns=titulos)
bondarg=bondarg.drop(bondarg.index[0])
# creamos las columnas de diferencia de precio entre AE38-AL30 y Al41 - AL30
bondarg["Diferencia Precio 38-30"]=bondarg[" AE38D "]-bondarg[" AL30D "]
bondarg["Diferencia Precio 41-30"]=bondarg[" AL41D "]-bondarg[" AL30D "]
media_38_30=bondarg["Diferencia Precio 38-30"].mean()
media_41_30=bondarg["Diferencia Precio 41-30"].mean()
# ##Workbook va en mayusculas la primera
# nuevoexcel=Workbook()
# nuevoexcel_sheet=nuevoexcel.active
# nuevoexcel=bondarg
# nuevoexcel.save("datasetarg.xlsx")
# como navegar por columnas con el iloc con cordenas
bono38=bondarg.iloc[0:,5
]
# copiar un "Dataframe" y pegarlo en otro lado Copia 2
# copia2
# bondarg2=bondarg
# # altero el original solo con la columna
# bondarg=bondarg[" AE38D "]
bondarg_head10=bondarg[["Date"," AE38D "]].head(10)
bondarg_tail10=bondarg[["Date"," AE38D "]].tail(10)
bondarg_combinado=bondarg_head10.append(bondarg_tail10)
bondarg_combinado2= bondarg_head10.append(\
[bondarg_tail10,bondarg_head10] \
)
|
emilapuente1/Practica-Pyhton-Bond-Arg
|
Bondarg.py
|
Bondarg.py
|
py
| 1,510 |
python
|
es
|
code
| 0 |
github-code
|
6
|
41849719603
|
from Engine import is_area_in_board
class Knight:
def __init__(self, color, pos):
self.pos = pos
self.color = color
self.val = 30 * self.color
self.image_id = self.create_image_id()
def create_image_id(self):
return 13 + self.color
def move(self, board):
moves = []
correct_moves = []
directions = [2, 1, 2, -1, -2, 1, -2, -1, 1, 2, 1, -2, -1, 2, -1, -2]
for i in range(0, len(directions), 2):
moves.append((self.pos[0] + directions[i], self.pos[1] + directions[i + 1]))
for area in moves:
if is_area_in_board(area):
if board[area[0]][area[1]].checker is None:
correct_moves.append(area)
elif board[area[0]][area[1]].checker.color != self.color:
if board[area[0]][area[1]].checker.val != 900 * self.color * -1:
correct_moves.append(area)
return correct_moves
def king_attack(self, board):
moves = []
directions = [2, 1, 2, -1, -2, 1, -2, -1, 1, 2, 1, -2, -1, 2, -1, -2]
for i in range(0, len(directions), 2):
moves.append((self.pos[0] + directions[i], self.pos[1] + directions[i + 1]))
for area in moves:
if is_area_in_board(area):
if board[area[0]][area[1]].checker is not None and board[area[0]][area[1]].checker.color != self.color:
if board[area[0]][area[1]].checker.val == 900 * self.color * -1:
return True
return False
def copy(self):
return Knight(self.color, self.pos)
|
MaciejKrol51/chess
|
Knight.py
|
Knight.py
|
py
| 1,692 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3759720297
|
from geometry import *
from ctypes import *
class TriangleMesh(Structure):
_fields_ = [('vNum',c_uint),('v',POINTER(Vector)),
('viNum',c_uint),('vi',POINTER(c_uint)),
('vnNum',c_uint),('vn',POINTER(Vector)),
('vni',POINTER(c_uint))]
def __init__(self, v = tuple(), vi = tuple(), vn = tuple(), vni = tuple()):
self.vNum = len(v)
self.viNum = len(vi)
self.vnNum = len(vn)
vniNum = len(vni)
if self.viNum % 3 or vniNum % 3 :
raise ValueError("ERROR in TriangleMesh: not enought elements !!!!!!")
self.v = (Vector * self.vNum)(*v)
self.vi = (c_uint * self.viNum)(*vi)
if self.vnNum:
self.vn = (Vector * self.vnNum)(*vn)
if not vniNum:
self.vni = (c_uint * self.viNum)(*vi)
else:
self.vni = (c_uint * vniNum)(*vni)
else:
if not vniNum:
self.vn = get_normals(self.vNum, self.v, self.viNum, self.vi)
self.vni = (c_uint * self.viNum)(*vi)
else:
pass
def get_normals(vNum, vertices, viNum, indices):
res = (Vector * vNum)()
for j in xrange(0,viNum,3):
for i in xrange(3):
dirx = vertices[indices[j + (i + 1)%3]] - vertices[indices[j + i]]
dirz = vertices[indices[j + (i + 3 - 1)%3]] - vertices[indices[j + i]]
diry = dirx.cross(dirz)
res[indices[j + i]] += diry
for j in xrange(vNum):
res[j].normalize()
return res
|
millag/Loop-subdivision
|
SubdivisionSurfaces/shapes.py
|
shapes.py
|
py
| 1,661 |
python
|
en
|
code
| 6 |
github-code
|
6
|
16484762613
|
import pygame
import time
import random
pygame.init()
screensize = (200,200) # This is a Vector 2 Dimentional Object
screen = pygame.display.set_mode(screensize)
run = True
color = (250, 153, 0)
displacement = 0
x_pos = 200
x_pos_2 = 300
y_pos = 95
pipeno = 0
pipeno2 = 0
gamepipes = 10
loclist = []
for a in range(gamepipes):
loclist.append(random.randint(-20,40))
loclist2 = []
for b in range(gamepipes):
loclist2.append(random.randint(-20,40))
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.fill(color)
pygame.draw.rect(screen, (255,255,255), pygame.Rect(95, y_pos, 20, 20))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos, loclist[pipeno], 20, 60))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos, 120 + loclist[pipeno], 20, 60))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos_2, loclist2[pipeno2], 20, 60))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos_2, 120 + loclist2[pipeno2], 20, 60))
displacement = 3
x_pos -= displacement
x_pos_2 -= displacement
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
y_pos -= 8
y_pos += 2
if x_pos <= 0:
x_pos = 200
pipeno += 1
if x_pos_2 <= 0:
x_pos_2 = 200
pipeno2 += 1
if y_pos >= 200:
run = False
print("game over!")
rect1 = pygame.Rect(95, y_pos, 20, 20)
rect2 = pygame.Rect(x_pos, loclist[pipeno], 20, 60)
rect3 = pygame.Rect(x_pos, 120 + loclist[pipeno], 20, 60)
rect4 = pygame.Rect(x_pos_2, loclist2[pipeno2], 20, 60)
rect5 = pygame.Rect(x_pos_2, 120 + loclist2[pipeno2], 20, 60)
collideTest1 = rect1.colliderect(rect2)
collideTest2 = rect1.colliderect(rect3)
collideTest3 = rect1.colliderect(rect4)
collideTest4 = rect1.colliderect(rect5)
if collideTest1 == 1 or collideTest2 == 1 or collideTest3 == 1 or collideTest4 == 1:
print("game over")
run = False
time.sleep(0.033)
pygame.display.update()
print(f"Total Pipes: {pipeno + pipeno2}")
|
RinUnderscore/LSCC-Pygame-Lesson
|
main.py
|
main.py
|
py
| 1,965 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17646581677
|
import boto3
from flask import Flask, request
import json
import os
app = Flask(__name__)
REGION = 'eu-north-1'
TESTING_URL = 'http://localhost:4566' # os.environ['LOCAL_TESTING']
TOPIC_ARN = 'arn:aws:sns:eu-north-1:000000000000:techtalk-sns'
@app.route('/')
def demo_homepage():
return "Welcome to Anusha`s LocalStack Demo."
@app.route('/send', methods=['POST'])
def send_messages():
session = boto3.session.Session(region_name=REGION)
message = (request.data).decode('ascii')
sns = session.client('sns', endpoint_url=TESTING_URL)
response = sns.publish(
TopicArn=TOPIC_ARN,
Subject='Hello',
Message=message
)
return {'response': 'message sent successfully'}
@app.route('/receive')
def receive_messages():
session = boto3.session.Session(region_name=REGION)
result = 'No Message Recieved' # Default
sqs = session.client('sqs', endpoint_url=TESTING_URL)
response = sqs.receive_message(
QueueUrl='http://localhost:4566/000000000000/techtalk'
)
msgs = response.get('Messages')
if msgs:
result = [(json.loads(msg.get('Body'))).get('Message') for msg in msgs if msg.get('Body')]
handles = [{'Id': msg.get('MessageId'), 'ReceiptHandle': msg.get('ReceiptHandle')} for msg in msgs]
sqs.delete_message_batch(
QueueUrl='http://localhost:4566/000000000000/techtalk',
Entries=handles
)
return {'Result': result}
|
anushacassum/mylocalstack
|
app.py
|
app.py
|
py
| 1,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19352162843
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = list(input().split(' '))
row= int(n[0])
col = int(n[1])
row1=True
spl_chr = '.|.'
spl_chr_num = 1
dash = '-'
num = 1
for r in range(1,(int((row-1)/2)+1)):
dash_num = int(col-(spl_chr_num*3))
print((dash)*int((dash_num)/2),end='')
for spl in range(spl_chr_num):
print(spl_chr,end='')
spl_chr_num+=2
print((dash)*int((dash_num)/2))
print((dash)* int((col-7)/2),end='')
print('WELCOME',end='')
print((dash)* int((col-7)/2))
spl_chr_num-=2
for r in range(1,(int((row-1)/2)+1)):
dash_num = int(col-(spl_chr_num*3))
print((dash)*int((dash_num)/2),end='')
for spl in range(spl_chr_num):
print(spl_chr,end='')
spl_chr_num-=2
print((dash)*int((dash_num)/2))
|
SomanshuMishra/HackerRank
|
Designer_Door_Mat.py
|
Designer_Door_Mat.py
|
py
| 785 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.