hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edcef98b9dacc4ad7161a18913e7407f99c623b2
| 410 |
py
|
Python
|
yougetguide/com/aaron/youtu.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
yougetguide/com/aaron/youtu.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | 2 |
2021-03-25T22:00:07.000Z
|
2022-01-20T15:51:48.000Z
|
yougetguide/com/aaron/youtu.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
# _*_ encoding:utf-8 _*_
"""
pip install you-get
https://you-get.org/
"""
__author__ = "aaron.qiu"
from you_get.common import any_download
from you_get.__main__ import main_dev
if __name__ == '__main__':
any_download(url="https://www.youtube.com/watch?v=QuPiZ86EFhQ", output_dir="E:\\", merge="E:\\")
# any_download(url="https://www.youtube.com/watch?v=jNQXAC9IVRw", output_dir="E:\\", merge="E:\\")
| 29.285714 | 102 | 0.692683 |
61271b84835e262c10e8b559fc1fa612e169a55f
| 2,122 |
py
|
Python
|
doc/for_dev/scikit-image/setup_codes/_greyreconstruct_reconstruction_loop.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/for_dev/scikit-image/setup_codes/_greyreconstruct_reconstruction_loop.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/for_dev/scikit-image/setup_codes/_greyreconstruct_reconstruction_loop.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import numpy as np
from future._greyreconstruct import reconstruction_loop
from skimage.filters._rank_order import rank_order
y, x = np.mgrid[:20:0.5, :20:0.5]
bumps = np.sin(x) + np.sin(y)
h = 0.3
seed = bumps - h
mask = bumps
assert tuple(seed.shape) == tuple(mask.shape)
selem = np.ones([3] * seed.ndim, dtype=bool)
offset = np.array([d // 2 for d in selem.shape])
# Cross out the center of the selem
selem[tuple(slice(d, d + 1) for d in offset)] = False
# Make padding for edges of reconstructed image so we can ignore boundaries
dims = np.zeros(seed.ndim + 1, dtype=int)
dims[1:] = np.array(seed.shape) + (np.array(selem.shape) - 1)
dims[0] = 2
inside_slices = tuple(slice(o, o + s) for o, s in zip(offset, seed.shape))
# Set padded region to minimum image intensity and mask along first axis so
# we can interleave image and mask pixels when sorting.
pad_value = np.min(seed)
images = np.full(dims, pad_value, dtype="float64")
images[(0, *inside_slices)] = seed
images[(1, *inside_slices)] = mask
# Create a list of strides across the array to get the neighbors within
# a flattened array
value_stride = np.array(images.strides[1:]) // images.dtype.itemsize
image_stride = np.int64(images.strides[0] // images.dtype.itemsize)
selem_mgrid = np.mgrid[[slice(-o, d - o) for d, o in zip(selem.shape, offset)]]
selem_offsets = selem_mgrid[:, selem].transpose()
nb_strides = np.array(
[np.sum(value_stride * selem_offset) for selem_offset in selem_offsets],
np.int32,
)
images = images.flatten()
# Erosion goes smallest to largest; dilation goes largest to smallest.
index_sorted = np.argsort(images).astype(np.int32)
index_sorted = index_sorted[::-1]
# Make a linked list of pixels sorted by value. -1 is the list terminator.
prev = np.full(len(images), -1, np.int32)
next_ = np.full(len(images), -1, np.int32)
prev[index_sorted[1:]] = index_sorted[:-1]
next_[index_sorted[:-1]] = index_sorted[1:]
# Cython inner-loop compares the rank of pixel values.
value_rank, value_map = rank_order(images)
start = index_sorted[0]
ranks = np.array(value_rank)
strides = nb_strides
current_idx = np.int64(start)
| 33.68254 | 79 | 0.724317 |
b64ab9777b46eab4ee60cf54089c1834113bf065
| 154 |
py
|
Python
|
rmpflow_using_class/world.py
|
YoshimitsuMatsutaIe/hoge_flow_test
|
22e2e2ce043a3107bd06449f6f9958641293e414
|
[
"MIT"
] | null | null | null |
rmpflow_using_class/world.py
|
YoshimitsuMatsutaIe/hoge_flow_test
|
22e2e2ce043a3107bd06449f6f9958641293e414
|
[
"MIT"
] | null | null | null |
rmpflow_using_class/world.py
|
YoshimitsuMatsutaIe/hoge_flow_test
|
22e2e2ce043a3107bd06449f6f9958641293e414
|
[
"MIT"
] | null | null | null |
"""世界クラス"""
import numpy as np
class World:
def __init__(self):
pass
if __name__ == '__main__':
pass
| 9.058824 | 27 | 0.461039 |
b6938ad851ae1550dcb14a69fef08b345fac93c0
| 2,358 |
py
|
Python
|
pyBarcode-0.6/barcode/__init__.py
|
fau-fablab/etiketten
|
5f4fcdd14001807e183164ae66df48d86c8f7a3e
|
[
"Unlicense"
] | 1 |
2018-12-15T04:07:38.000Z
|
2018-12-15T04:07:38.000Z
|
pyBarcode-0.6/barcode/__init__.py
|
fau-fablab/etiketten
|
5f4fcdd14001807e183164ae66df48d86c8f7a3e
|
[
"Unlicense"
] | 14 |
2015-01-02T10:37:33.000Z
|
2021-10-03T13:42:15.000Z
|
pyBarcode-0.6/barcode/__init__.py
|
fau-fablab/etiketten
|
5f4fcdd14001807e183164ae66df48d86c8f7a3e
|
[
"Unlicense"
] | 1 |
2018-12-15T04:07:40.000Z
|
2018-12-15T04:07:40.000Z
|
# -*- coding: utf-8 -*-
"""
pyBarcode
=========
This package provides a simple way to create standard barcodes.
It needs no external packages to be installed, the barcodes are
created as SVG objects. If PIL (Python Imaging Library) is
installed, the barcodes can also be rendered as images (all
formats supported by PIL).
"""
__project__ = 'pyBarcode'
__author__ = 'Thorsten Weimann'
__copyright__ = '2010-2011, ' + __author__
__author_email__ = '[email protected]'
__description__ = ('Create standard barcodes with Python. No external '
'modules needed (optional PIL support included).')
__version__ = '0.6'
__release__ = '0.6'
__license__ = 'MIT'
__url__ = 'http://bitbucket.org/whitie/pybarcode/'
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia :: Graphics',
]
from barcode.errors import BarcodeNotFoundError
from barcode.codex import Code39, PZN
from barcode.ean import EAN8, EAN13, JAN
from barcode.isxn import ISBN10, ISBN13, ISSN
from barcode.upc import UPCA
__BARCODE_MAP = dict(
ean8=EAN8,
ean13=EAN13,
ean=EAN13,
gtin=EAN13,
jan=JAN,
upc=UPCA,
upca=UPCA,
isbn=ISBN13,
isbn13=ISBN13,
gs1=ISBN13,
isbn10=ISBN10,
issn=ISSN,
code39=Code39,
pzn=PZN,
)
PROVIDED_BARCODES = __BARCODE_MAP.keys()
PROVIDED_BARCODES.sort()
def get_barcode(name, code=None, writer=None):
try:
barcode = __BARCODE_MAP[name.lower()]
except KeyError:
raise BarcodeNotFoundError('The barcode {0!r} you requested is not '
'known.'.format(name))
if code is not None:
return barcode(code, writer)
else:
return barcode
def get_barcode_class(name):
return get_barcode(name)
def generate(name, code, writer=None, output=None, writer_options=None):
options = writer_options or {}
barcode = get_barcode(name, code, writer)
if isinstance(output, basestring):
fullname = barcode.save(output, options)
return fullname
else:
barcode.write(output, options)
| 26.2 | 76 | 0.678117 |
fcb883962e19f13abc3f5f56595d2598d41a13a9
| 773 |
py
|
Python
|
challenges/specialPolynomial/python3/specialPolynomial.py
|
jimmynguyen/codefights
|
f4924fcffdb4ff14930618bb1a781e4e02e9aa09
|
[
"MIT"
] | 5 |
2020-05-21T03:02:34.000Z
|
2021-09-06T04:24:26.000Z
|
challenges/specialPolynomial/python3/specialPolynomial.py
|
jimmynguyen/codefights
|
f4924fcffdb4ff14930618bb1a781e4e02e9aa09
|
[
"MIT"
] | 6 |
2019-04-24T03:39:26.000Z
|
2019-05-03T02:10:59.000Z
|
challenges/specialPolynomial/python3/specialPolynomial.py
|
jimmynguyen/codefights
|
f4924fcffdb4ff14930618bb1a781e4e02e9aa09
|
[
"MIT"
] | 1 |
2021-09-06T04:24:27.000Z
|
2021-09-06T04:24:27.000Z
|
def specialPolynomial(x, n):
s = 1
k = 0
while s <= n:
k += 1
s += x**k
return k-1
if __name__ == '__main__':
input0 = [2, 10, 1, 3]
input1 = [5, 111111110, 100, 140]
expectedOutput = [1, 7, 99, 4]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = specialPolynomial(input0[i], input1[i])
assert actual == expected, 'specialPolynomial({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
| 42.944444 | 132 | 0.658473 |
fcbf602d803b6a54b3266317d31336074db58fce
| 4,933 |
py
|
Python
|
NucleusDataset.py
|
ramesh152/SegmentationExperiment
|
b52dbc57759c086155e79c6621e6d25a4db0e974
|
[
"Apache-2.0"
] | 2 |
2019-06-12T13:53:00.000Z
|
2020-04-29T13:11:12.000Z
|
NucleusDataset.py
|
ramesh152/SegmentationExperiment
|
b52dbc57759c086155e79c6621e6d25a4db0e974
|
[
"Apache-2.0"
] | 1 |
2019-12-10T12:23:06.000Z
|
2019-12-10T12:23:06.000Z
|
NucleusDataset.py
|
ramesh152/SegmentationExperiment
|
b52dbc57759c086155e79c6621e6d25a4db0e974
|
[
"Apache-2.0"
] | 1 |
2020-08-05T18:12:24.000Z
|
2020-08-05T18:12:24.000Z
|
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
import torch.nn as nn
import numpy as np
import cv2
import os.path as osp
from glob import glob
from tqdm import tqdm
class NucleusDataset(Dataset):
def __init__(self, root_dir, train=True, transform=None, target_transform=None, mode ="train",
do_reshuffle=True, keys=None,taskname = None,batch_size=16, num_batches=10000000, seed=None):
self.root_dir = root_dir
self.transform = transform
self.target_transform = target_transform
self.train = train
self.taskname = taskname
self.image_names = keys
self.mode = mode
self.data_len = len(self.image_names)
self.batch_size = batch_size
self.num_batches = min((self.data_len // self.batch_size)+10, num_batches)
dataDir = "imagesTr"
maskDir = "masksTr"
suffix =".png"
print("root_dir :",root_dir, " taskname : ",taskname,"self.mode :",self.mode)
print(" path : ",osp.join(self.root_dir, taskname))
if not self._check_task_exists():
raise RuntimeError("Task does not exist")
if self.mode=="train":
#self.image_names = os.listdir(os.path.join(self.root_dir, "train",dataDir))
#print("train image_names :",self.image_names)
self.train_data = []
self.train_labels = []
for image in self.image_names :
train_img = cv2.imread(osp.join(self.root_dir,self.taskname,dataDir,image+suffix))
#print("image path: ",osp.join(self.root_dir,self.taskname,dataDir,image+suffix))
self.train_data.append(train_img)
target_img = np.zeros(train_img.shape[:2], dtype=np.uint8)
target_img_ = cv2.imread(osp.join(self.root_dir,taskname,maskDir,image+suffix),0)
target_img = np.maximum(target_img, target_img_)
self.train_labels.append(target_img)
elif self.mode =="val":
#self.image_names = os.listdir(osp.join(self.root_dir, "test",dataDir))
self.val_data = []
self.val_labels = []
for image in self.image_names:
#print(" Val image_names :",self.image_names)
val_img = cv2.imread(osp.join(self.root_dir,self.taskname,dataDir,image+suffix))
self.val_data.append(val_img)
val_target_img = np.zeros(val_img.shape[:2], dtype=np.uint8)
val_target_img_ = cv2.imread(osp.join(self.root_dir,self.taskname,maskDir,image+suffix),0)
val_target_img = np.maximum(val_target_img, val_target_img_)
self.val_labels.append(val_target_img)
else :
self.test_data = []
self.test_labels = []
for image in self.image_names:
#print(" Test image_names :",self.image_names)
test_img = cv2.imread(osp.join(self.root_dir,taskname,dataDir,image+suffix))
self.test_data.append(test_img)
test_target_img = np.zeros(test_img.shape[:2], dtype=np.uint8)
test_target_img_ = cv2.imread(osp.join(self.root_dir,taskname,maskDir,image+suffix),0)
test_target_img = np.maximum(test_target_img, test_target_img_)
self.test_labels.append(test_target_img)
def __len__(self):
return len(self.image_names)
def __getitem__(self, item):
if self.mode=="train":
image, mask = self.train_data[item], self.train_labels[item]
if self.transform:
image = self.transform(image)
if self.target_transform:
mask = self.target_transform(mask)
return image, mask
elif self.mode=="val":
image, mask = self.val_data[item], self.val_labels[item]
if self.transform:
image = self.transform(image)
if self.target_transform:
mask = self.target_transform(mask)
return image, mask
else:
image, mask = self.test_data[item], self.test_labels[item]
if self.transform:
image = self.transform(image)
if self.target_transform:
mask = self.target_transform(mask)
return image, mask
def _check_exists(self):
return osp.exists(osp.join(self.root_dir, "train")) and osp.exists(osp.join(self.root_dir, "test"))
def _check_task_exists(self):
return osp.exists(osp.join(self.root_dir, self.taskname))
| 38.539063 | 111 | 0.59254 |
781ffedcf4eb6caa8472cb0a95889aa460aacf54
| 4,187 |
py
|
Python
|
TimeBomb/plugin.py
|
jztech101/Loggy
|
1883a09ebbb6c8120cae648aebeb2771e7f39d9b
|
[
"MIT"
] | null | null | null |
TimeBomb/plugin.py
|
jztech101/Loggy
|
1883a09ebbb6c8120cae648aebeb2771e7f39d9b
|
[
"MIT"
] | 6 |
2018-02-09T18:39:37.000Z
|
2018-08-31T18:40:02.000Z
|
TimeBomb/plugin.py
|
jztech101/Loggy
|
1883a09ebbb6c8120cae648aebeb2771e7f39d9b
|
[
"MIT"
] | null | null | null |
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircmsgs as ircmsgs
import supybot.schedule as schedule
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import random
import time
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('RandCMDs')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
def isChan(chan, checkprefix):
if not chan:
return False
elif chan.startswith("#"):
return True
elif checkprefix and len(chan) >= 2 and not chan[0].isalnum() and chan[1] == "#":
return True
else:
return False
def getHostname(nick, irc):
return ircutils.hostFromHostmask(irc.state.nickToHostmask(nick))
class TimeBomb(callbacks.Plugin):
"""TimeBomb"""
def __init__(self, irc):
self.__parent = super(TimeBomb, self)
self.__parent.__init__(irc)
self.bomb = False
self.bombtarget = ""
self.chan = ""
self.sender = ""
self.rng = random.Random()
self.rng.seed()
self.goodWire = ""
self.wires = ['Blue', 'Green', 'Red', 'Yellow', 'Pink', 'Purple', 'Orange', 'Black', 'Gray', 'White', 'Brown']
def timebomb(self, irc, msg, args, something):
"""TimeBomb"""
if not isChan(msg.args[0], True) or not self.registryValue("bombsEnabled",msg.args[0]):
return
elif self.bomb:
irc.reply("A bomb is already running with seconds on the clock, please wait until it explodes (or gets defused)")
return
nick = msg.nick
if len(something) > 0 and something[0] in irc.state.channels[msg.args[0]].users and something[0] != irc.nick:
nick = something[0]
if self.registryValue("bombsExempt",msg.args[0]):
for x in self.registryValue("bombsExempt",msg.args[0]).split(","):
if x == getHostname(nick, irc):
nick = msg.nick
break
self.bombtarget = nick
self.bomb = True
self.sender = msg.nick
self.goodWire = self.rng.choice(self.wires)
logChannel = self.registryValue('logChan')
self.chan = msg.args[0]
if logChannel:
irc.queueMsg(ircmsgs.privmsg(logChannel, "[TimeBomb] " + msg.prefix + ': (' + self.chan + ') A bomb has been shoved down ' + self.bombtarget + '\'s underwear, the good wire is the ' + self.goodWire + ' one'))
irc.reply('A bomb has been shoved inside ' + self.bombtarget + '\'s underwear, it will detonate in 60 seconds. These are the wires: ' + ' '.join(self.wires))
#schedule.removeEvent('detonate')
schedule.addEvent(self.detonate, time.time() + 59, 'detonate', [irc, msg])
timebomb = wrap(timebomb, [optional(many('something'))])
def cut(self, irc, msg, args, something):
"""cut"""
if not self.bomb:
return
if msg.nick == self.bombtarget:
if len(something) > 0 and self.goodWire.lower() == something[0].lower():
self.bombtarget = self.sender
self.sender = msg.nick
irc.reply("The bomb has been defused and is sent back with seconds on the clock!")
else:
self.detonate(irc, msg)
else:
self.bombtarget = msg.nick
irc.reply("Uh-Oh. The bomb has suddenly moved itself into " + self.bombtarget + '\'s underwear')
cut = wrap(cut, [optional(many('something'))])
def detonate(self, irc, msg):
""" detonate """
if self.bomb:
if not irc.state.channels[self.chan].isOp(irc.nick) and self.registryValue("bombDefenseEnabled", msg.args[0]):
irc.sendMsg(ircmsgs.privmsg("chanserv", "op " + self.chan))
schedule.addEvent(irc.queueMsg, time.time() + 1, 'detonating', [ircmsgs.kick(self.chan, self.bombtarget, "KA-BOOOOOOOOOOOOOOOOM!")])
self.bomb = False
schedule.removeEvent('detonate')
Class = TimeBomb
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 42.292929 | 220 | 0.615238 |
1589bd8034ad9ff8a7b1cd9fe23fc9670a853980
| 1,181 |
py
|
Python
|
backend/python/flask_app/app.py
|
Untanky/bachelor-thesis
|
01b3c00765006ab6b140607e426533a7eed1508b
|
[
"MIT"
] | 1 |
2021-02-08T17:07:54.000Z
|
2021-02-08T17:07:54.000Z
|
backend/python/flask_app/app.py
|
Untanky/bachelor-thesis
|
01b3c00765006ab6b140607e426533a7eed1508b
|
[
"MIT"
] | null | null | null |
backend/python/flask_app/app.py
|
Untanky/bachelor-thesis
|
01b3c00765006ab6b140607e426533a7eed1508b
|
[
"MIT"
] | 1 |
2021-02-08T17:08:01.000Z
|
2021-02-08T17:08:01.000Z
|
from flask import Flask, request, make_response, jsonify
from flask_cors import CORS
from sqlalchemy import create_engine
from controller import PostController, response_with_status_code
import sys, os
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../dao/src"))
from Exception import IllegalArgumentException, UnknownElementException
from PostDAO import PostDAO
from Post import Post
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
CORS(app)
postDAO = PostDAO(create_engine("postgres://root:root@database:5432/blog"))
controller = PostController(postDAO)
@app.route("/api/blog/post", methods=['GET', 'POST'])
def post():
if request.method == 'GET':
return controller.fetchAllPost()
elif request.method == 'POST':
return controller.createPost()
else:
return response_with_status_code(404)
@app.route("/api/blog/post/<int:postId>", methods=['PUT', 'DELETE'])
def post_by_id(postId):
if request.method == 'PUT':
return controller.updatePost(postId)
elif request.method == 'DELETE':
return controller.deletePost(postId)
else:
return response_with_status_code(404)
if __name__ == "__main__":
app.run(host = '0.0.0.0')
| 31.918919 | 75 | 0.750212 |
15bd4274e6d8fdce9fe3d2528a023fbdda7c53b4
| 384 |
py
|
Python
|
hello/hello_ui/hello_ttk.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
hello/hello_ui/hello_ttk.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
hello/hello_ui/hello_ttk.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter.ttk
import tkinter
root = tkinter.Tk()
tkinter.ttk.Style().configure("TButton", padding=6, relief="flat",
background="#ccc")
count = 0
def show_hello():
tkinter.ttk.Label(text="Hello World!!").pack()
btn = tkinter.ttk.Button(text="Sample", command=show_hello)
btn.pack()
root.mainloop()
| 16 | 66 | 0.640625 |
01a7d0637b5eebf039bcaa642b8fb40f2dbc88ec
| 8,657 |
py
|
Python
|
py_script/test_question_recall.py
|
skybluezx/GraphCompute
|
95dc176147a7a16eb2176fd1f384de467e3daad6
|
[
"MIT"
] | 4 |
2021-07-29T11:32:34.000Z
|
2021-08-12T07:13:25.000Z
|
py_script/test_question_recall.py
|
skybluezx/GraphCompute
|
95dc176147a7a16eb2176fd1f384de467e3daad6
|
[
"MIT"
] | null | null | null |
py_script/test_question_recall.py
|
skybluezx/GraphCompute
|
95dc176147a7a16eb2176fd1f384de467e3daad6
|
[
"MIT"
] | 1 |
2021-11-29T03:47:35.000Z
|
2021-11-29T03:47:35.000Z
|
import os
import json
def get_courseware_main_kp(kp_courseware_question_file_path):
courseware_main_kp_list = dict()
with open(kp_courseware_question_file_path, 'r') as f:
for line in f:
json_object = json.loads(line)
if json_object['courseware_id'] not in courseware_main_kp_list:
courseware_main_kp_list[json_object['courseware_id']] = list()
courseware_main_kp_list[json_object['courseware_id']].append(list())
courseware_main_kp_list[json_object['courseware_id']].append(list())
for question_id in json_object['question']:
courseware_main_kp_list[json_object['courseware_id']][1].append({'id': question_id, 'weight': 1.0})
courseware_main_kp_list[json_object['courseware_id']][0].append({'id': json_object['kp_id'], 'weight': 1.0})
return courseware_main_kp_list
def get_recall_task_json_from_kp(save_dir_path, courseware_main_kp_list, is_merge, stepDefine, auxiliaryEdge, walkLengthRatio, restartRatio, totalStepCount, isSplitStepCount, visitedCountTopN):
for cw in courseware_main_kp_list:
task_json = dict()
task_json["name"] = "multi_walk"
task_json["beginNodeType"] = ["KnowledgePoint", "Question"]
task_json["beginNodeID"] = list()
task_json["beginNodeID"].append(courseware_main_kp_list[cw][0])
task_json["beginNodeID"].append(courseware_main_kp_list[cw][1])
task_json["is_merge"] = is_merge
task_json["stepDefine"] = stepDefine
task_json["auxiliaryEdge"] = auxiliaryEdge
task_json["walkLengthRatio"] = walkLengthRatio
task_json["restartRatio"] = restartRatio
task_json["totalStepCount"] = totalStepCount
task_json["isSplitStepCount"] = isSplitStepCount
task_json["targetNodeType"] = "Question"
task_json["visitedCountTopN"] = visitedCountTopN
# print(task_json)
# break
with open(save_dir_path + "/" + cw + '.json', 'w') as f:
json.dump(task_json, f)
def get_recall_task_json_from_course(save_dir_path, course_id_list, course_request_node_list, is_merge, stepDefine, auxiliaryEdge, walkLengthRatio, restartRatio, totalStepCount, isSplitStepCount, visitedCountTopN):
for course in course_request_node_list:
if course not in course_id_list:
continue
task_json = dict()
task_json["name"] = "multi_walk"
task_json["beginNodeType"] = ["KnowledgePoint", "Question"]
task_json["beginNodeID"] = list()
if 'kp' in course_request_node_list[course]:
task_json["beginNodeID"].append(course_request_node_list[course]['kp'])
else:
task_json["beginNodeID"].append(list())
if 'question' in course_request_node_list[course]:
task_json["beginNodeID"].append(course_request_node_list[course]['question'])
else:
task_json["beginNodeID"].append(list())
task_json["is_merge"] = is_merge
task_json["stepDefine"] = stepDefine
task_json["auxiliaryEdge"] = auxiliaryEdge
task_json["walkLengthRatio"] = walkLengthRatio
task_json["restartRatio"] = restartRatio
task_json["totalStepCount"] = totalStepCount
task_json["isSplitStepCount"] = isSplitStepCount
task_json["targetNodeType"] = "Question"
task_json["visitedCountTopN"] = visitedCountTopN
# print(task_json)
# break
with open(save_dir_path + "/" + course + '.json', 'w') as f:
json.dump(task_json, f)
def recall_from_file(recall_bin_path, save_dir_path, course_kp_list):
# 遍历全部课件
for cw in course_kp_list:
recall_command_json_file_path = save_dir_path + "/" + cw + '.json'
# print(recall_command_json_file_path)
command = recall_bin_path + ' --server_name=main --json=' + recall_command_json_file_path
print(command)
os.system(command)
break
def get_question_mastery_dict(question_mastery_file_path):
question_mastery_dict = dict()
with open(question_mastery_file_path, 'r') as f:
obj = json.load(f)
for course in obj:
question_mastery_dict[course] = list()
data_list = obj[course]['data']
for data in data_list:
question_mastery_dict[course].append({'id': data['que_id'], 'weight': float(data['que_mastery_level'])})
return question_mastery_dict
def get_kp_mastery_dict(kp_mastery_file_path):
kp_mastery_dict = dict()
with open(kp_mastery_file_path, 'r') as f:
for line in f:
if line.strip() == "":
continue
obj = json.loads(line)
for course in obj:
kp_mastery_dict[course] = list()
for kp in obj[course]:
kp_mastery_dict[course].append({'id': kp, 'weight': obj[course][kp]})
return kp_mastery_dict
def question_and_kp_merge(question_mastery_dict, kp_mastery_dict):
request_node_list = dict()
for course in question_mastery_dict:
if course not in request_node_list:
request_node_list[course] = dict()
request_node_list[course]['question'] = question_mastery_dict[course]
for course in kp_mastery_dict:
if course not in request_node_list:
request_node_list[course] = dict()
request_node_list[course]['kp'] = kp_mastery_dict[course]
return request_node_list
if __name__ == "__main__":
# 客户端路径
recall_bin_path = '/Users/zhaixiao/workplace/c_cpp/GraphCompute/build/bin/macos-clang-12.0.5/GraphComputeClient'
# 测试知识点-课件-题目文件路径
test_kp_courseware_question_file_path = '/Users/zhaixiao/workplace/c_cpp/GraphCompute/build/test_data/kp_courseware_question.json'
# 测试题目掌握文件路径
test_question_mastery_file_path = '/Users/zhaixiao/workplace/c_cpp/GraphCompute/build/test_data/question_mastery.json'
# 测试知识点掌握文件路径
test_kp_mastery_file_path = '/Users/zhaixiao/workplace/c_cpp/GraphCompute/build/test_data/kp_mastery.dat'
# 生成召回任务的json文件目录路径
task_json_save_dir_path = '/Users/zhaixiao/workplace/c_cpp/GraphCompute/build/test_data/test_course_task_json'
# # 获取全部课件的召回知识点和召回题目
# course_kp_list = get_courseware_main_kp(test_kp_courseware_question_file_path)
# 获取全部题目掌握数据
question_mastery_dict = get_question_mastery_dict(test_question_mastery_file_path)
# 获取全部知识点掌握数据
kp_mastery_dict = get_kp_mastery_dict(test_kp_mastery_file_path)
course_request_node_list = question_and_kp_merge(question_mastery_dict, kp_mastery_dict)
# print(course_request_node_list)
is_merge = True
stepDefine = [
["KnowledgePoint", "Question", "Courseware"],
["KnowledgePoint", "Question", "Courseware"]
]
auxiliaryEdge = [
# {"Question": "Courseware"},
# {"Question": "Courseware"},
]
walkLengthRatio = [0.0, 0.0]
restartRatio = [0.02, 0.02]
totalStepCount = [100000, 100000]
isSplitStepCount = [False, False]
visitedCountTopN = 100
# get_recall_task_json_from_kp(task_json_save_dir_path,
# course_kp_list,
# is_merge,
# stepDefine,
# auxiliaryEdge,
# walkLengthRatio,
# restartRatio,
# totalStepCount,
# isSplitStepCount,
# visitedCountTopN)
course_id_list = ['1685524',
'9911711',
'10381553',
'6086788',
'1644252',
'1627344',
'1652910',
'1689361',
'1630150',
'10490049']
# get_recall_task_json_from_course(task_json_save_dir_path,
# course_id_list,
# course_request_node_list,
# is_merge,
# stepDefine,
# auxiliaryEdge,
# walkLengthRatio,
# restartRatio,
# totalStepCount,
# isSplitStepCount,
# visitedCountTopN)
# recall_from_file(recall_bin_path, task_json_save_dir_path, course_kp_list)
| 36.682203 | 214 | 0.617881 |
bf26e2fcf5aa6ab695cba1677a162a9790060f1d
| 4,468 |
py
|
Python
|
practices/practice_2/task4.2.py
|
br4ch1st0chr0n3/robotic_systems_labs
|
23b8b81dc845e00cf02460258b9cec817019957b
|
[
"MIT"
] | null | null | null |
practices/practice_2/task4.2.py
|
br4ch1st0chr0n3/robotic_systems_labs
|
23b8b81dc845e00cf02460258b9cec817019957b
|
[
"MIT"
] | null | null | null |
practices/practice_2/task4.2.py
|
br4ch1st0chr0n3/robotic_systems_labs
|
23b8b81dc845e00cf02460258b9cec817019957b
|
[
"MIT"
] | null | null | null |
from libs.can import CANSocket
from libs.myactuator import MyActuator
from time import perf_counter, sleep
import numpy as np
# import getpass
# password = getpass.getpass()
# the serial port of device
# you may find one by examing /dev/ folder,
# this is usually devices ttyACM
# os.system(f"sudo slcand -o -c -s8 /dev/serial/by-id/usb-Protofusion_Labs_CANable_8c005eb_https\:__github.com_normaldotcom_cantact-fw.git_001D00335734570920343135-if00 can0")
serial_device = "ttyACM1"
# Initiate the can bus socket
can_bus = CANSocket(serial_port=serial_device)
# Initiate motor
motor = MyActuator(can_bus=can_bus)
# Set the control loop timings
frequency = 1000
sampling_time = 1 / frequency
def stop_motor(motor):
for _ in range(100):
motor.set_current(0)
# total working time
T = 3
N = T * frequency
# gains = [20]
gains = [20,50,70]
g_n = len(gains)
# sin_amplitudes = [2]
sin_amplitudes = [2, 6]
# sin_frequencies = [10]
sin_frequencies = [1, 7]
amp_n = len(sin_amplitudes)
freq_n = len(sin_frequencies)
angles = np.zeros((amp_n, freq_n, g_n, N))
velocities = np.zeros((amp_n, freq_n, g_n, N))
times = np.zeros(N)
angle_initial = 2
velocity_desired = 0
angle_desired = np.zeros((amp_n, freq_n, N))
motor.set_current(0)
try:
for amp in range(amp_n):
for freq in range(freq_n):
for k in range(g_n):
i = 0
last_execution = 0
control = 0
# find the global time before entering control loop
initial_time = perf_counter()
# motor.set_zero()
initial_angle = motor.state["angle"] + angle_initial
while True:
time = perf_counter() - initial_time # get actual time in secs
# /////////////////////////
# Get and parse motor state
# /////////////////////////
state = motor.state
theta = state["angle"] - initial_angle
dtheta = state["speed"]
current = state["current"]
# ///////////////////////////////////////////
# Update the control only on specific timings
# ///////////////////////////////////////////
# P-control
if (time - last_execution) >= sampling_time:
if i >= N:
break
angles[amp, freq, k, i] = theta
velocities[amp, freq, k, i] = dtheta
times[i] = time
current_desired = sin_amplitudes[amp] * np.sin(sin_frequencies[freq] * time)
angle_desired[amp, freq, i] = current_desired
control = -gains[k] * (theta - current_desired)
i += 1
last_execution = time
# YOUR CONTROLLER GOES HERE
motor.set_current(control)
stop_motor(motor)
sleep(1)
except KeyboardInterrupt:
stop_motor(motor)
print("Disabled by interrupt")
motor = None
import matplotlib.pyplot as plt
fig, ax = plt.subplots(amp_n * freq_n, 2, figsize=(16, amp_n*freq_n*5))
bound = 1
last_n = 10
def add_plot(ax, x, ts, gain):
ax.plot(
ts,
x,
label=f"gain: {gain}",
)
for amp in range(amp_n):
for freq in range(freq_n):
ax0 = ax[amp * freq_n + freq, 0]
ax1 = ax[amp * freq_n + freq, 1]
ax0.set_xlabel("t [s]")
ax0.set_ylabel("$\\theta$ [$rad$]")
ax1.set_xlabel("t [s]")
ax1.set_ylabel("$\\dot{\\theta}$ [$\\frac{rad}{s}$]")
for i in range(g_n):
add_plot(
ax=ax0,
x=angles[amp, freq, i],
ts=times,
gain=gains[i],
)
add_plot(
ax=ax1,
x=velocities[amp, freq, i],
ts=times,
gain=gains[i],
)
ax0.plot(times, angle_desired[amp, freq], label=f"$\\theta_{{desired}}$, amplitude: {sin_amplitudes[amp]}, frequency: {sin_frequencies[freq]} Hz")
ax0.legend()
ax1.legend()
fig.suptitle(f"control loop frequency = {frequency} Hz", fontsize="13")
fig.tight_layout(pad=3.0)
plt.savefig("./plots/4.2.png")
plt.show()
| 26.915663 | 175 | 0.520815 |
17445f5f07de989a85324937c557067a23958ba3
| 4,288 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/setup/setup_wizard/operations/defaults_setup.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/setup/setup_wizard/operations/defaults_setup.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/setup/setup_wizard/operations/defaults_setup.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr, getdate
from frappe.core.doctype.communication.comment import add_info_comment
def set_default_settings(args):
# enable default currency
frappe.db.set_value("Currency", args.get("currency"), "enabled", 1)
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.update({
'current_fiscal_year': get_fy_details(args.get('fy_start_date'), args.get('fy_end_date')),
'default_currency': args.get('currency'),
'default_company':args.get('company_name') ,
"country": args.get("country"),
})
global_defaults.save()
system_settings = frappe.get_doc("System Settings")
system_settings.email_footer_address = args.get("company_name")
system_settings.save()
domain_settings = frappe.get_single('Domain Settings')
domain_settings.set_active_domains(args.get('domains'))
stock_settings = frappe.get_doc("Stock Settings")
stock_settings.item_naming_by = "Item Code"
stock_settings.valuation_method = "FIFO"
stock_settings.default_warehouse = frappe.db.get_value('Warehouse', {'warehouse_name': _('Stores')})
stock_settings.stock_uom = _("Nos")
stock_settings.auto_indent = 1
stock_settings.auto_insert_price_list_rate_if_missing = 1
stock_settings.automatically_set_serial_nos_based_on_fifo = 1
stock_settings.save()
selling_settings = frappe.get_doc("Selling Settings")
selling_settings.cust_master_name = "Customer Name"
selling_settings.so_required = "No"
selling_settings.dn_required = "No"
selling_settings.allow_multiple_items = 1
selling_settings.save()
buying_settings = frappe.get_doc("Buying Settings")
buying_settings.supp_master_name = "Supplier Name"
buying_settings.po_required = "No"
buying_settings.pr_required = "No"
buying_settings.maintain_same_rate = 1
buying_settings.allow_multiple_items = 1
buying_settings.save()
notification_control = frappe.get_doc("Notification Control")
notification_control.quotation = 1
notification_control.sales_invoice = 1
notification_control.purchase_order = 1
notification_control.save()
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.save()
def set_no_copy_fields_in_variant_settings():
# set no copy fields of an item doctype to item variant settings
doc = frappe.get_doc('Item Variant Settings')
doc.set_default_fields()
doc.save()
def create_price_lists(args):
for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))):
frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": 1 if pl_type == "Buying" else 0,
"selling": 1 if pl_type == "Selling" else 0,
"currency": args["currency"]
}).insert()
def create_employee_for_self(args):
if frappe.session.user == 'Administrator':
return
# create employee for self
emp = frappe.get_doc({
"doctype": "Employee",
"employee_name": " ".join(filter(None, [args.get("first_name"), args.get("last_name")])),
"user_id": frappe.session.user,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
def create_territories():
"""create two default territories, one for home country and one named Rest of the World"""
from frappe.utils.nestedset import get_root_of
country = frappe.db.get_default("country")
root_territory = get_root_of("Territory")
for name in (country, _("Rest Of The World")):
if name and not frappe.db.exists("Territory", name):
frappe.get_doc({
"doctype": "Territory",
"territory_name": name.replace("'", ""),
"parent_territory": root_territory,
"is_group": "No"
}).insert()
def create_feed_and_todo():
"""update Activity feed and create todo for creation of item, customer, vendor"""
add_info_comment(**{
"subject": _("ERPNext Setup Complete!")
})
def get_fy_details(fy_start_date, fy_end_date):
start_year = getdate(fy_start_date).year
if start_year == getdate(fy_end_date).year:
fy = cstr(start_year)
else:
fy = cstr(start_year) + '-' + cstr(start_year + 1)
return fy
| 34.031746 | 101 | 0.752799 |
e53cac43abe6635fcf3bddc1958ddbbcf9d875a2
| 524 |
pyde
|
Python
|
sketches/ca01/ca01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/ca01/ca01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/ca01/ca01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
from cell import Cell
GRID_W = 51
GRID_H = 51
def setup():
global cellList
size(600, 600)
this.surface.setTitle("CA 1")
cellList = createCellList()
def draw():
for row in cellList:
for cell in row:
cell.display()
def createCellList():
sz = width//GRID_W + 1
newList = []
for j in range(GRID_H):
newList.append([])
for i in range(GRID_W):
newList[j].append(Cell(i, j, sz))
newList[GRID_H//2][GRID_W//2].state = 1
return(newList)
| 20.153846 | 45 | 0.578244 |
e5a36e0147df5dbc6f2cc497ee2bbfccca7dcc8b
| 1,510 |
py
|
Python
|
数据结构/NowCode/16_FindFirstCommonNode.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | null | null | null |
数据结构/NowCode/16_FindFirstCommonNode.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 3 |
2020-08-14T07:50:27.000Z
|
2020-08-14T08:51:06.000Z
|
数据结构/NowCode/16_FindFirstCommonNode.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 2 |
2021-03-14T05:58:45.000Z
|
2021-08-29T17:25:52.000Z
|
# 两个链表的公共节点
# 输入一个复杂链表(每个节点中有节点值,以及两个指针,一个指向下一个节点,另一个特殊指针random指向一个随机节点),
# 请对此链表进行深拷贝,并返回拷贝后的头结点。(注意,输出结果中请不要返回参数中的节点引用,否则判题程序会直接返回空)
class Solution:
# 第一个参数给比较短的链表,第二个参数给长链表的值
# def findEqual(self,):
def FindFirstCommonNode(self, pHead1, pHead2):
# 假设输入的两个链表,是同一个链表
if pHead1 == pHead2:
return pHead1
pTmp1 = pHead1
pTmp2 = pHead2
# 我们通过循环,让其中一个节点走到最后
while pTmp1 and pTmp2:
pTmp1 = pTmp1.next
pTmp2 = pTmp2.next
# 判断哪个链表先走到最后
# 假设pTmp1,还没有走完,说明pTmp2是更短的
if pTmp1:
k = 0
# 寻找链表长度之间的差值
while pTmp1:
pTmp1 = pTmp1.next
k += 1
# 我们让pTmp1先跳N步
pTmp2 = pHead2
pTmp1 = pHead1
for i in range(k):
pTmp1 = pTmp1.next
# 当找到节点相等的时候,也就是说明该节点是公共节点
while pTmp1 != pTmp2:
pTmp1 = pTmp1.next
pTmp2 = pTmp2.next
return pTmp1
# 假设pTmp2,还没有走完,说明pTmp1是更短的
if pTmp2:
k = 0
while pTmp2:
pTmp2 = pTmp2.next
k += 1
# 我们让pTmp2先跳N步
pTmp2 = pHead2
pTmp1 = pHead1
for i in range(k):
pTmp2 = pTmp2.next
while pTmp1 != pTmp2:
pTmp1 = pTmp1.next
pTmp2 = pTmp2.next
return pTmp1
if __name__ == '__main__':
print()
| 23.968254 | 61 | 0.491391 |
f91e5410497abb871d44b4b26fcd319dca905de1
| 561 |
py
|
Python
|
python/pubmed_test.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 89 |
2015-02-13T13:46:06.000Z
|
2022-03-13T16:42:44.000Z
|
python/pubmed_test.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 91 |
2015-03-12T13:31:36.000Z
|
2022-01-14T07:37:37.000Z
|
python/pubmed_test.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 138 |
2015-03-04T15:23:43.000Z
|
2022-03-09T15:11:52.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Look up a DOI in PubMed.
This script looks up a DOI in pubmed.
"""
import argparse
from openapc_toolkit import get_metadata_from_pubmed as gmfp
def main():
parser = argparse.ArgumentParser()
parser.add_argument("doi", help="A DOI to look up in pubmed.")
args = parser.parse_args()
res = gmfp(args.doi)
if res["success"]:
for key, value in res["data"].items():
print(key, ":", value)
else:
print(res["error_msg"])
if __name__ == '__main__':
main()
| 20.035714 | 66 | 0.622103 |
00818ae4d66705777029ed38b458a986cd50e250
| 6,242 |
py
|
Python
|
test/test_npu/test_network_ops/test_multilabel_margin_loss.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_multilabel_margin_loss.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_multilabel_margin_loss.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
from itertools import repeat, product
class TestMultilabelMarginLoss(TestCase):
def cpu_op_exec(self, data, target, reduction):
output = torch.nn.functional.multilabel_margin_loss(input=data, target=target, reduction=reduction)
output = output.to("cpu")
output = output.detach().numpy()
return output
def npu_op_exec(self, data, target, reduction):
output = torch.nn.functional.multilabel_margin_loss(input=data, target=target, reduction=reduction)
output = output.to("cpu")
output = output.to(torch.float32)
output = output.detach().numpy()
return output
def cpu_op_exec_out(self, data, target, c, reduction):
output = torch._C._nn.multilabel_margin_loss(input=data, target=target, reduction=reduction, out=c)
output = output.to("cpu")
output = output.detach().numpy()
return output
def npu_op_exec_out(self, data, target, c, reduction):
output = torch._C._nn.multilabel_margin_loss(input=data, target=target, reduction=reduction, out=c)
output = output.to("cpu")
output = output.detach().numpy()
return output
def test_multilabel_margin_loss_1(self, device):
data = torch.Tensor([[0.1, 0.2, 0.4, 0.8], [0.1, 0.2, 0.4, 0.8]]).to(torch.float32)
target = torch.Tensor([[3, 0, -1, 1], [0, 1, 3, -1]]).to(torch.int64)
for reduction in ["mean", "none", "sum"]:
data_npu = data.to("npu")
target_npu = target.to(torch.int32).to("npu")
cpu_output = self.cpu_op_exec(data, target, reduction)
npu_output = self.npu_op_exec(data_npu, target_npu, reduction)
self.assertRtolEqual(cpu_output, npu_output)
def test_multilabel_margin_loss_2(self, device):
data = torch.Tensor([[0.1, 0.2, 0.4, 0.8], [0.1, 0.2, 0.4, 0.8]]).to(torch.float32)
target = torch.Tensor([[1, 1, 1, 1], [1, 1, 1, 1]]).to(torch.int64)
for reduction in ["mean", "none", "sum"]:
data_npu = data.to("npu")
target_npu = target.to(torch.int32).to("npu")
cpu_output = self.cpu_op_exec(data, target, reduction)
npu_output = self.npu_op_exec(data_npu, target_npu, reduction)
self.assertRtolEqual(cpu_output, npu_output)
def test_multilabel_margin_loss_3(self, device):
data = torch.Tensor([[0.1, 0.2, 0.4, 0.8, 0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.2, 0.4, 0.8, 0.1, 0.1, 0.1, 0.1, 0.1]]).to(torch.float32)
target = torch.Tensor([[3, 0, 7, 8, 1, -1, 1, 2, 2], [4, 5, -1, 1, 1, 1, 1, 2, 2]]).to(torch.int64)
for reduction in ["mean", "none", "sum"]:
data_npu = data.to("npu")
target_npu = target.to(torch.int32).to("npu")
cpu_output = self.cpu_op_exec(data, target, reduction)
npu_output = self.npu_op_exec(data_npu, target_npu, reduction)
self.assertRtolEqual(cpu_output, npu_output)
def test_multilabel_margin_loss_out(self, device):
data = torch.tensor([[-0.4191, 0.6214],
[-0.3765, -0.4781],
[ 0.2881, 0.4888]]).to(torch.float32)
target = torch.tensor([[ 1, -1],
[ 0, -1],
[ 1, -1]]).to(torch.int64)
for reduction in range(3):
data_npu = data.to("npu")
target_npu = target.to(torch.int32).to("npu")
c = torch.randn(1, 2, 3).float()
cpu_output = self.cpu_op_exec_out(data, target, c, reduction)
c = torch.randn(1, 2, 3).float()
c_npu = c.to("npu")
npu_output = self.npu_op_exec_out(data_npu, target_npu, c_npu, reduction)
self.assertRtolEqual(cpu_output, npu_output)
def test_multilabel_margin_loss_float16_1(self, device):
data = torch.Tensor([[0.1, 0.2, 0.4, 0.8], [0.1, 0.2, 0.4, 0.8]]).to(torch.float32)
target = torch.Tensor([[3, 0, -1, 1], [0, 1, 3, -1]]).to(torch.int64)
for reduction in ["mean", "none", "sum"]:
data_npu = data.to(torch.float16).to("npu")
target_npu = target.to(torch.int32).to("npu")
cpu_output = self.cpu_op_exec(data, target, reduction)
npu_output = self.npu_op_exec(data_npu, target_npu, reduction)
cpu_output = cpu_output.astype(np.float16)
npu_output = npu_output.astype(np.float16)
self.assertRtolEqual(cpu_output, npu_output)
def test_multilabel_margin_loss_float16_2(self, device):
data = torch.Tensor([[0.1, 0.2, 0.4, 0.8, 0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.2, 0.4, 0.8, 0.1, 0.1, 0.1, 0.1, 0.1]]).to(torch.float32)
target = torch.Tensor([[3, 0, 7, 8, 1, -1, 1, 2, 2], [4, 5, -1, 1, 1, 1, 1, 2, 2]]).to(torch.int64)
for reduction in ["mean", "none", "sum"]:
data_npu = data.to(torch.float16).to("npu")
target_npu = target.to(torch.int32).to("npu")
cpu_output = self.cpu_op_exec(data, target, reduction)
npu_output = self.npu_op_exec(data_npu, target_npu, reduction)
cpu_output = cpu_output.astype(np.float16)
npu_output = npu_output.astype(np.float16)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestMultilabelMarginLoss, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 46.237037 | 141 | 0.618392 |
009ff893a246abac723c2453f610f4f157cd7c08
| 395 |
py
|
Python
|
flask/tutorial-1-3-router-blueprint/users/views.py
|
pisitj/practice-python-web-framework
|
5f7f60737b1cf9618e73ad8047b6c4f556d1feb0
|
[
"MIT"
] | null | null | null |
flask/tutorial-1-3-router-blueprint/users/views.py
|
pisitj/practice-python-web-framework
|
5f7f60737b1cf9618e73ad8047b6c4f556d1feb0
|
[
"MIT"
] | null | null | null |
flask/tutorial-1-3-router-blueprint/users/views.py
|
pisitj/practice-python-web-framework
|
5f7f60737b1cf9618e73ad8047b6c4f556d1feb0
|
[
"MIT"
] | null | null | null |
# https://github.com/gothinkster/flask-realworld-example-app/blob/master/conduit/user/views.py
from flask import Blueprint
blueprint = Blueprint('users', __name__, url_prefix='/users')
@blueprint.route("/", methods=('GET',))
def get_user_list():
return {"message": "Get List of Users."}
@blueprint.route("/", methods=('POST',))
def create_user():
return {"message": "Create a User."}
| 30.384615 | 94 | 0.701266 |
979c526ba5837032a402dbf4c59db3301a80e989
| 1,289 |
py
|
Python
|
AnaliseZipf/lerolero_extractor.py
|
Superar/PLN
|
2ed5ea1f0ebb28ca8dd3729c064758bc5c2abcee
|
[
"MIT"
] | null | null | null |
AnaliseZipf/lerolero_extractor.py
|
Superar/PLN
|
2ed5ea1f0ebb28ca8dd3729c064758bc5c2abcee
|
[
"MIT"
] | null | null | null |
AnaliseZipf/lerolero_extractor.py
|
Superar/PLN
|
2ed5ea1f0ebb28ca8dd3729c064758bc5c2abcee
|
[
"MIT"
] | null | null | null |
# from builtins import print
import time
import scrapy
from selenium import webdriver
def is_two_pages(fileobject):
fileobject.seek(0, 2)
size = fileobject.tell()
if size > 16000:
return True
else:
return False
class TextSpider(scrapy.Spider):
name = "text_spider"
allowed_domains = ['lerolero.com/']
start_urls = [
'https://www.lerolero.com/']
def __init__(self):
self.driver = webdriver.Chrome('driver/chromedriver')
def parse(self, response):
self.driver.get(response.url)
file_name = str(int(round(time.time() * 1000)))
file = open('test_base_{}.txt'.format(file_name), 'w+')
file_name = file.name
while True:
gerar_frase = self.driver.find_element_by_id('gerar-frase')
try:
gerar_frase.click()
lerolero_sentence = self.driver.find_element_by_class_name('sentence').text
file.write(lerolero_sentence)
file.write(' ')
if is_two_pages(file):
file.close()
break
time.sleep(1)
except:
file.close()
break
self.driver.close()
return file_name
| 21.483333 | 91 | 0.559348 |
e7608e1a8bd326ff094486601d1bccb84db52a27
| 5,192 |
py
|
Python
|
wz/ui/datatable_widget.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui/datatable_widget.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui/datatable_widget.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ui/datatable_widget.py
Last updated: 2021-10-22
Gui editor widget for "DataTables".
See datatable-editor.py for an app which can be used for testing this
widget.
=+LICENCE=================================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE=================================
"""
#TODO: Callback for modification of info items – separate from main table?
# Undo/redo for info items?
from PySide6.QtWidgets import QSizePolicy, QSplitter, \
QScrollArea, QWidget, QGridLayout, QLabel, QLineEdit
from PySide6.QtCore import Qt, QSize
from ui.editable import EdiTableWidget
from tables.spreadsheet import Spreadsheet, read_DataTable
### -----
class TextLine(QLineEdit):
def __init__(self, index, dataTableEditor):
self.index = index
self.dataTableEditor = dataTableEditor
super().__init__()
self.setContextMenuPolicy(Qt.NoContextMenu)
self.__text = ''
# self.textEdited.connect(self.newtext)
self.editingFinished.connect(self.newtext)
def set(self, text):
self.setText(text)
self.__text = text
def newtext(self):
text = self.text()
if text != self.__text:
self.__text = text
self.dataTableEditor.modified(True)
class InfoTable(QScrollArea):
def __init__(self):
super().__init__()
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(False)
self.setSizePolicy(sizePolicy)
self.setWidgetResizable(True)
def init(self, info, dataTableEditor):
contents = QWidget()
gridLayout = QGridLayout(contents)
self.info = []
r = 0
for key, val in info.items():
if key[0] != '_':
gridLayout.addWidget(QLabel(key), r, 0, 1, 1)
lineEdit = TextLine(0, dataTableEditor)
lineEdit.set(val)
gridLayout.addWidget(lineEdit, r, 1, 1, 1)
self.info.append([key, lineEdit])
r += 1
self.setWidget(contents)
def get_info(self):
return [(key, w.text()) for key, w in self.info]
class DataTableEditor(QSplitter):
def __init__(self):
super().__init__()
self.setOrientation(Qt.Vertical)
self.info = InfoTable()
self.addWidget(self.info)
self.table = EdiTableWidget()
sizePolicy1 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(1)
sizePolicy1.setHeightForWidth(False)
self.table.setSizePolicy(sizePolicy1)
self.addWidget(self.table)
def modified(self, mod):
"""Indicate data changed. Override this method in a subclass.
"""
#print(f"** MODIFIED: {mod} **")
pass
#TODO?
def reset_modified(self):
"""Reset the modified state of the data.
"""
self.table.reset_modified()
self.modified(False)
def open_table(self, datatable):
"""Read in a DataTable from the given path.
"""
#TODO: If it is done within another application, there might be translated headers
# (calling for <filter_DataTable(data, fieldlist, infolist, extend = True)>).
self.__info = datatable['__INFO__']
self.__columns = datatable['__FIELDS__']
self.__rows = datatable['__ROWS__']
self.info.init(self.__info, self)
data = []
for row in self.__rows:
rowdata = []
data.append(rowdata)
c = 0
for h in self.__columns:
rowdata.append(row[h])
c += 1
self.table.setup(colheaders = self.__columns,
undo_redo = True, row_add_del = True,
cut = True, paste = True,
on_changed = self.modified)
self.table.init_data(data)
self.table.resizeColumnsToContents()
def get_data(self):
"""Read the data from the widget. Return it as a "datatable".
"""
for key, val in self.info.get_info():
self.__info[key] = val
self.__rows = []
for row in self.table.table_data:
rowdata = {}
c = 0
for hdr in self.__columns:
rowdata[hdr] = row[c]
c += 1
self.__rows.append(rowdata)
return {
'__INFO__': self.__info,
'__FIELDS__': self.__columns,
'__ROWS__': self.__rows
}
| 32.049383 | 82 | 0.609977 |
6b675ae6ec20cdf575afb06a81fd2c5c2aa8bbb7
| 1,418 |
py
|
Python
|
tensorflow/basic-rl/tutorial14/code/train_deepqnaf.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
tensorflow/basic-rl/tutorial14/code/train_deepqnaf.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
tensorflow/basic-rl/tutorial14/code/train_deepqnaf.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
|
from baselines.deepqnaf.learn import learn
import gym
import tensorflow as tf
import argparse
#parser
parser = argparse.ArgumentParser()
parser.add_argument('--environment', dest='environment', type=str, default='MountainCarContinuous-v0')
parser.add_argument('--num_timesteps', dest='num_timesteps', type=int, default=100000)
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
args = parser.parse_args()
with tf.Session() as sess:
# create the environment
env = gym.make(str(args.environment))
# Check continuity of the environment
assert isinstance(env.observation_space, gym.spaces.Box), \
"observation space must be continuous"
assert isinstance(env.action_space, gym.spaces.Box), \
"action space must be continuous"
# Fix these two values and calculate episodes from the given timesteps
max_steps = 200
update_repeat = 5
max_episodes = args.num_timesteps // (max_steps*update_repeat)
learn (env, sess,
hidden_dims=[64,64],
# hidden_dims=[100,100],
use_batch_norm=True,
# learning_rate=0.001,
learning_rate=0.0001,
batch_size=100, # kind of like the size of the replay buffer
max_steps=max_steps,
update_repeat=update_repeat,
max_episodes=max_episodes,
outdir="/tmp/experiments/"+str(args.environment)+"/DEEPQNAF/")
| 36.358974 | 102 | 0.685472 |
d892995b60c5c42b2a079b10bef6d2f69d80e364
| 3,006 |
py
|
Python
|
menucard/migrations/0002_auto_20201230_1855.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | 1 |
2021-01-23T21:42:10.000Z
|
2021-01-23T21:42:10.000Z
|
menucard/migrations/0002_auto_20201230_1855.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
menucard/migrations/0002_auto_20201230_1855.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-12-30 17:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menucard', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='alkoholfreiedrinks',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='alkoholhaltigedrinks',
name='zusatzstoffe',
field=models.CharField(blank=True, default='', max_length=55, null=True),
),
migrations.AddField(
model_name='hauptspeise',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='nachspeise',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='snacks',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='vorspeise',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AlterField(
model_name='alkoholfreiedrinks',
name='liter',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='alkoholfreiedrinks',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='alkoholhaltigedrinks',
name='centiliter',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='alkoholhaltigedrinks',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='hauptspeise',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='nachspeise',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='snacks',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='vorspeise',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
]
| 35.785714 | 96 | 0.581171 |
991287d9cb504a003155ca828430c40f5035c979
| 58 |
py
|
Python
|
febonaci.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
febonaci.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
febonaci.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | 1 |
2020-09-30T18:53:05.000Z
|
2020-09-30T18:53:05.000Z
|
a=0
b=1
while a<10:
print(a)
a,b=b,a+b
| 8.285714 | 14 | 0.396552 |
99201384e272dc16b1c63b79e4253d0df9af8dfd
| 15,870 |
py
|
Python
|
easyp2p/p2p_parser.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 4 |
2019-07-18T10:58:28.000Z
|
2021-11-18T16:57:45.000Z
|
easyp2p/p2p_parser.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 1 |
2019-07-05T09:21:47.000Z
|
2019-07-05T09:21:47.000Z
|
easyp2p/p2p_parser.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 2 |
2019-07-05T08:56:34.000Z
|
2020-06-09T10:03:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Niko Sandschneider
"""
Module for parsing output files of P2P platforms and printing combined results.
Each P2P platform has a unique format for presenting investment results. The
purpose of this module is to provide parser methods to transform them into a
single output format.
"""
from datetime import date
import logging
from pathlib import Path
from typing import Mapping, Optional, Tuple
import numpy as np
import pandas as pd
from pandas.errors import ParserError
from PyQt5.QtCore import QCoreApplication
from easyp2p.p2p_signals import Signals
_translate = QCoreApplication.translate
logger = logging.getLogger('easyp2p.p2p_parser')
class P2PParser:
"""
Parser class to transform P2P account statements into easyp2p format.
Each P2P platform uses a unique format for their account statements. The
purpose of P2PParser is to provide parser methods for transforming those
files into a single unified easyp2p statement format.
"""
# Signals for communicating with the GUI
signals = Signals()
# Define all necessary payment types
BONUS_PAYMENT = _translate('P2PParser', 'Bonus payments')
BUYBACK_INTEREST_PAYMENT = _translate(
'P2PParser', 'Buyback interest payments')
BUYBACK_PAYMENT = _translate('P2PParser', 'Buybacks')
DEFAULTS = _translate('P2PParser', 'Defaults')
END_BALANCE_NAME = _translate('P2PParser', 'End balance')
IGNORE = 'Ignored'
INTEREST_PAYMENT = _translate('P2PParser', 'Interest payments')
INVESTMENT_PAYMENT = _translate('P2PParser', 'Investments')
IN_OUT_PAYMENT = _translate('P2PParser', 'Deposit/Outpayment')
LATE_FEE_PAYMENT = _translate('P2PParser', 'Late fee payments')
REDEMPTION_PAYMENT = _translate('P2PParser', 'Redemption payments')
START_BALANCE_NAME = _translate('P2PParser', 'Start balance')
TOTAL_INCOME = _translate('P2PParser', 'Total income')
# Define additional column names
CF_TYPE = 'Cash flow type'
CURRENCY = _translate('P2PParser', 'Currency')
DATE = _translate('P2PParser', 'Date')
MONTH = _translate('P2PParser', 'Month')
PLATFORM = _translate('P2PParser', 'Platform')
# TARGET_COLUMNS are the columns which will be shown in the final result
# file
TARGET_COLUMNS = [
START_BALANCE_NAME,
END_BALANCE_NAME,
IN_OUT_PAYMENT,
INVESTMENT_PAYMENT,
REDEMPTION_PAYMENT,
BUYBACK_PAYMENT,
INTEREST_PAYMENT,
BUYBACK_INTEREST_PAYMENT,
LATE_FEE_PAYMENT,
BONUS_PAYMENT,
DEFAULTS,
TOTAL_INCOME]
@signals.watch_errors
def __init__(
self, name: str, date_range: Tuple[date, date],
statement_file_name: str, header: int = 0,
skipfooter: int = 0, signals: Optional[Signals] = None) -> None:
"""
Constructor of P2PParser class.
Args:
name: Name of the P2P platform
date_range: Date range (start_date, end_date) for which the account
statement was generated
statement_file_name: File name including absolute path of the
downloaded account statement for this platform
header: Row number to use as column names and start of data in the
statement.
skipfooter: Rows to skip at the end of the statement.
signals: Signals instance for communicating with the calling class.
Raises:
RuntimeError: If the account statement could not be loaded from
statement file
"""
self.name = name
self.date_range = date_range
if signals:
self.signals.connect_signals(signals)
self.df = get_df_from_file(
statement_file_name, header=header, skipfooter=skipfooter)
self.logger = logging.getLogger('easyp2p.p2p_parser.P2PParser')
# Check if account statement exists
if self.df is None:
raise RuntimeError(_translate(
'P2PParser',
f'{self.name} parser: no account statement available!'))
self.logger.debug('Created P2PParser instance for %s.', self.name)
def _calculate_total_income(self):
""" Calculate total income for each row of the DataFrame """
self.logger.debug('%s: calculating total income.', self.name)
income_columns = [
self.INTEREST_PAYMENT,
self.LATE_FEE_PAYMENT,
self.BUYBACK_INTEREST_PAYMENT,
self.BONUS_PAYMENT,
self.DEFAULTS]
self.df[self.TOTAL_INCOME] = 0.
for col in [col for col in self.df.columns if col in income_columns]:
self.df[self.TOTAL_INCOME] += self.df[col]
self.logger.debug('%s: finished calculating total income.', self.name)
def _aggregate_results(
self, value_column: Optional[str],
balance_column: Optional[str]) -> None:
"""
Aggregate results in value_column by date and currency.
Args:
value_column: Name of the DataFrame column which contains the
data to be aggregated
balance_column: DataFrame column which contains the balances
"""
self.logger.debug(
'%s: start aggregating results in column %s.',
self.name, value_column)
orig_df = self.df
if value_column:
self.df = self.df.pivot_table(
values=value_column, index=[self.DATE, self.CURRENCY],
columns=[self.CF_TYPE], aggfunc=np.sum)
self.df.reset_index(inplace=True)
self.df.fillna(0, inplace=True)
# Start and end balance columns were summed up as well if they were
# present. That's obviously not correct, so we will look up the correct
# values in the original DataFrame and overwrite the sums.
if balance_column:
# The start balance value of each day already includes the first
# daily cash flow which needs to be subtracted again
self.df[self.START_BALANCE_NAME] = (
orig_df.groupby([self.DATE, self.CURRENCY]).first()[
balance_column]
- orig_df.groupby(self.DATE).first()[
value_column]).reset_index()[0]
self.df[self.END_BALANCE_NAME] = \
orig_df.groupby([self.DATE, self.CURRENCY]).last()[
balance_column].reset_index()[balance_column]
self.logger.debug('%s: finished aggregating results.', self.name)
def _filter_date_range(self, date_format: str) -> None:
"""
Only keep dates in self.date_range in DataFrame self.df.
Args:
date_format: Date format which the platform uses
"""
self.logger.debug('%s: filter date range.', self.name)
start_date = pd.Timestamp(self.date_range[0])
end_date = pd.Timestamp(self.date_range[1]).replace(
hour=23, minute=59, second=59)
self.df[self.DATE] = pd.to_datetime(
self.df[self.DATE], format=date_format)
self.df = self.df[
(self.df[self.DATE] >= start_date)
& (self.df[self.DATE] <= end_date)]
# Convert date column from datetime to date:
self.df[self.DATE] = self.df[self.DATE].dt.date
self.logger.debug('%s: filter date range finished.', self.name)
def _map_cashflow_types(
self, cashflow_types: Optional[Mapping[str, str]],
orig_cf_column: Optional[str]) -> Tuple[str, ...]:
"""
Map platform cashflow types to easyp2p cashflow types.
Args:
cashflow_types: Dictionary containing a mapping between platform
and easyp2p cash flow types
orig_cf_column: Name of the column in the platform account
statement which contains the cash flow type
Returns:
Sorted tuple of strings with all unknown cash flow types or an
empty tuple if no unknown cash flow types were found.
"""
if cashflow_types is None:
self.logger.debug('%s: no cash flow types to map.', self.name)
return ()
self.logger.debug(
'%s: mapping cash flow types %s contained in column %s.',
self.name, str(cashflow_types.keys()), orig_cf_column)
self.df[orig_cf_column] = self.df[orig_cf_column].str.strip()
self.df[self.CF_TYPE] = self.df[orig_cf_column].map(cashflow_types)
# All unknown cash flow types will be NaN
unknown_cf_types = self.df[orig_cf_column].where(
self.df[self.CF_TYPE].isna()).dropna().tolist()
# Remove duplicates, sort the entries and make them immutable
unknown_cf_types = tuple(sorted(set(unknown_cf_types)))
self.logger.debug('%s: mapping successful.', self.name)
return unknown_cf_types
def _add_zero_line(self):
"""Add a single zero cash flow for start date to the DataFrame."""
self.logger.debug('%s: adding zero cash flow.', self.name)
data = [
(self.name, 'EUR', self.date_range[0],
*[0.] * len(self.TARGET_COLUMNS))]
columns = [
self.PLATFORM, self.CURRENCY, self.DATE,
*self.TARGET_COLUMNS]
self.df = pd.DataFrame(data=data, columns=columns)
self.df.set_index(
[self.PLATFORM, self.CURRENCY, self.DATE], inplace=True)
self.logger.debug('%s: added zero cash flow.', self.name)
@signals.watch_errors
def _check_investment_col(self, value_column: str) -> None:
"""
Make sure outgoing investments have a negative sign.
Args:
value_column: Column name of investment amounts.
"""
self.check_columns(value_column)
investment_col = self.df.loc[
self.df[self.CF_TYPE] == self.INVESTMENT_PAYMENT, value_column]
if investment_col.min() > 0.:
investment_col *= -1
self.df.loc[
self.df[self.CF_TYPE] == self.INVESTMENT_PAYMENT, value_column] \
= investment_col
@signals.watch_errors
def check_columns(self, *columns) -> None:
"""
Check if column names exist in the data frame.
Args:
*columns: Names of the columns which should be present in the data
frame.
Raises:
RuntimeError: if at least one column is not present.
"""
missing = [col for col in columns if col not in self.df.columns]
if len(missing) > 0:
raise RuntimeError(_translate(
'P2PParser',
f'{self.name}: columns {missing} missing in account '
'statement!'))
@signals.update_progress
def parse(
self, date_format: str = None,
rename_columns: Mapping[str, str] = None,
cashflow_types: Optional[Mapping[str, str]] = None,
orig_cf_column: Optional[str] = None,
value_column: Optional[str] = None,
balance_column: Optional[str] = None) -> Tuple[str, ...]:
"""
Parse the account statement from platform format to easyp2p format.
Keyword Args:
date_format: Date format which the platform uses
rename_columns: Dictionary containing a mapping between platform
and easyp2p column names
cashflow_types: Dictionary containing a mapping between platform
and easyp2p cash flow types
orig_cf_column: Name of the column in the platform account
statement which contains the cash flow type
value_column: Name of the DataFrame column which contains the
amounts to be aggregated
balance_column: Name of the column which contains the portfolio
balances
Returns:
Sorted tuple of all unknown cash flow types as strings.
Raises:
RuntimeError: If date or cash flow columns cannot be found in
DataFrame
"""
self.logger.debug('%s: starting parser.', self.name)
# If there were no cash flows in date_range add a single zero line
if self.df.empty:
self._add_zero_line()
return ()
# Rename columns in DataFrame
if rename_columns:
self.check_columns(*rename_columns.keys())
self.df.rename(columns=rename_columns, inplace=True)
# Make sure we only show results between start and end date
if date_format:
self.check_columns(self.DATE)
self._filter_date_range(date_format)
if self.df.empty:
self._add_zero_line()
return ()
if cashflow_types:
self.check_columns(orig_cf_column)
unknown_cf_types = self._map_cashflow_types(
cashflow_types, orig_cf_column)
else:
unknown_cf_types = ()
# If the platform does not explicitly report currencies assume that
# currency is EUR
if self.CURRENCY not in self.df.columns:
self.df[self.CURRENCY] = 'EUR'
# Ensure that investment cash flows have a negative sign
if value_column:
self._check_investment_col(value_column)
# Sum up the results per date and currency
self._aggregate_results(value_column, balance_column)
# Add total income column
self._calculate_total_income()
# Set the index
self.df[self.PLATFORM] = self.name
self.df.set_index(
[self.PLATFORM, self.CURRENCY, self.DATE], inplace=True)
# Sort and drop all unnecessary columns
self.df = self.df[[
col for col in self.TARGET_COLUMNS if col in self.df.columns]]
# Round all values to 4 digits
self.df = self.df.round(4)
# Disconnect signals
self.signals.disconnect_signals()
self.logger.debug('%s: parser completed successfully.', self.name)
return unknown_cf_types
def get_df_from_file(
input_file: str, header: int = 0, skipfooter: int = 0) -> pd.DataFrame:
"""
Read a pandas.DataFrame from input_file.
Args:
input_file: File name including path.
header: Row number to use as column names and start of data.
skipfooter: Rows to skip at the end of the statement.
Returns:
pandas.DataFrame: DataFrame which was read from the file.
Raises:
RuntimeError: If input_file does not exist, cannot be read or if the \
file format is neither csv or xlsx.
"""
file_format = Path(input_file).suffix
try:
if file_format == '.csv':
if skipfooter:
# The default 'c' engine does not support skipfooter
df = pd.read_csv(
input_file, header=header, skipfooter=skipfooter,
engine='python')
else:
df = pd.read_csv(input_file, header=header)
elif file_format in ('.xlsx', '.xls'):
df = pd.read_excel(input_file, header=header, skipfooter=skipfooter)
elif file_format == '.json':
df = pd.read_json(input_file)
else:
raise RuntimeError(_translate(
'P2PParser',
f'Unknown file format during import: {input_file}'))
except FileNotFoundError:
logger.exception('File not found.')
raise RuntimeError(_translate(
'P2PParser', f'{input_file} could not be found!'))
except ParserError:
msg = f'{input_file} could not be parsed!'
logger.exception(msg)
raise RuntimeError(_translate('P2PParser', msg))
return df
| 37.429245 | 80 | 0.622684 |
51a7b49f979b6c5a0c6020e4d8c3a5c997029df6
| 15,831 |
py
|
Python
|
pyScript/custom_src/MainWindow.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript/custom_src/MainWindow.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript/custom_src/MainWindow.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
import os, sys
# QT
from PySide2.QtWidgets import QMainWindow, QFileDialog, QShortcut
from PySide2.QtGui import QColor, QFontDatabase, QIcon, QKeySequence
# parent UI
from ui.ui_main_window import Ui_MainWindow
from custom_src.GlobalAccess import GlobalStorage
# custom content
from custom_src.Script import Script
# from custom_src.VyVariable import VyVariable
from custom_src.Node import Node, NodePort, SetVariable_Node, GetVariable_Node
from custom_src.custom_nodes.GetVar_NodeInstance import GetVar_NodeInstance
from custom_src.custom_nodes.SetVar_NodeInstance import SetVar_NodeInstance
from custom_src.ScriptsListWidget import ScriptsListWidget
class MainWindow(QMainWindow):
def __init__(self, config):
super(MainWindow, self).__init__()
QFontDatabase.addApplicationFont('fonts/poppins/Poppins-Medium.ttf')
QFontDatabase.addApplicationFont('fonts/source code pro/SourceCodePro-Regular.ttf')
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.splitter.setSizes([120, 800])
self.setWindowTitle('pyScript')
self.setWindowIcon(QIcon('stuff/pics/program_icon.png'))
self.load_stylesheet('dark')
self.ui.scripts_tab_widget.removeTab(0)
self.ui.actionImport_Nodes.triggered.connect(self.on_import_nodes_triggered)
self.ui.actionSave_Project.triggered.connect(self.on_save_project_triggered)
self.ui.actionDesignDark_Std.triggered.connect(self.on_dark_std_design_triggered)
self.ui.actionDesignDark_Tron.triggered.connect(self.on_dark_tron_design_triggered)
self.ui.actionEnableDebugging.triggered.connect(self.on_enable_debugging_triggered)
self.ui.actionDisableDebugging.triggered.connect(self.on_disable_debugging_triggered)
self.ui.actionSave_Pic_Viewport.triggered.connect(self.on_save_scene_pic_viewport_triggered)
self.ui.actionSave_Pic_Whole_Scene_scaled.triggered.connect(self.on_save_scene_pic_whole_triggered)
# Shortcuts
save_shortcut = QShortcut(QKeySequence.Save, self)
save_shortcut.activated.connect(self.on_save_project_triggered)
self.custom_nodes = []
self.all_nodes = [SetVariable_Node(), GetVariable_Node()]
# holds NI subCLASSES for imported nodes:
self.all_node_instance_classes = {
self.all_nodes[0]: SetVar_NodeInstance,
self.all_nodes[1]: GetVar_NodeInstance
} # (key: node obj, val: NI subclass) (used in Flow)
self.custom_node_input_widget_classes = {} # {node : {str: PortInstanceWidget-subclass}} (used in PortInstance)
# self.node_images = {
# self.all_nodes[0]: self.get_node_image(self.all_nodes[0]),
# self.all_nodes[1]: self.get_node_image(self.all_nodes[1])
# } # {node: QImage}
if not os.path.exists('temp'):
os.mkdir('temp')
# clear temp folder
for f in os.listdir('temp'):
os.remove('temp/'+f)
self.scripts = []
self.scripts_list_widget = ScriptsListWidget(self, self.scripts)
self.ui.scripts_scrollArea.setWidget(self.scripts_list_widget)
self.ui.add_new_script_pushButton.clicked.connect(self.create_new_script_button_pressed)
self.ui.new_script_name_lineEdit.returnPressed.connect(self.create_new_script_le_return_pressed)
self.design_style = 'dark std'
if config['config'] == 'create plain new project':
self.try_to_create_new_script()
elif config['config'] == 'open project':
self.import_required_packages(config['required packages'])
self.parse_project(config['content'])
self.resize(1500, 800)
def load_stylesheet(self, ss):
ss_content = ''
try:
f = open('stuff/stylesheets/'+ss+'.txt')
ss_content = f.read()
f.close()
finally:
self.setStyleSheet(ss_content)
def on_dark_std_design_triggered(self):
self.set_design('dark std')
def on_dark_tron_design_triggered(self):
self.set_design('dark tron')
def set_design(self, new_design):
GlobalStorage.storage['design style'] = new_design
self.design_style = new_design
for script in self.scripts:
script.flow.design_style_changed()
def on_enable_debugging_triggered(self):
GlobalStorage.storage['debugging'] = True
def on_disable_debugging_triggered(self):
GlobalStorage.storage['debugging'] = False
def on_save_scene_pic_viewport_triggered(self):
if len(self.scripts) == 0:
return
file_path = QFileDialog.getSaveFileName(self, 'select file', '', 'PNG(*.png)')[0]
img = self.scripts[self.ui.scripts_tab_widget.currentIndex()].flow.get_viewport_img()
img.save(file_path)
def on_save_scene_pic_whole_triggered(self):
if len(self.scripts) == 0:
return
file_path = QFileDialog.getSaveFileName(self, 'select file', '', 'PNG(*.png)')[0]
img = self.scripts[self.ui.scripts_tab_widget.currentIndex()].flow.get_whole_scene_img()
img.save(file_path)
def create_new_script_button_pressed(self):
self.try_to_create_new_script(name=self.ui.new_script_name_lineEdit.text())
def create_new_script_le_return_pressed(self):
self.try_to_create_new_script(name=self.ui.new_script_name_lineEdit.text())
def try_to_create_new_script(self, name='fancy script', config=None):
if len(name) == 0:
return
for s in self.scripts:
if s.name == name:
return
new_script = Script(self, name, config)
new_script.name_changed.connect(self.rename_script)
self.ui.scripts_tab_widget.addTab(new_script.widget, new_script.name)
self.scripts.append(new_script)
self.scripts_list_widget.recreate_ui()
def rename_script(self, script, new_name):
self.ui.scripts_tab_widget.setTabText(self.scripts.index(script), new_name)
script.name = new_name
def delete_script(self, script):
index = self.scripts.index(script)
self.ui.scripts_tab_widget.removeTab(index)
del self.scripts[index]
def on_import_nodes_triggered(self):
file_path = QFileDialog.getOpenFileName(self, 'select nodes file', '../packages', 'PyScript Packages(*.pypac)',)[0]
if file_path != '':
self.import_nodes_package_from_file(file_path)
def import_required_packages(self, packages_list):
for p in packages_list:
self.import_nodes_package_from_file(p)
def import_nodes_package_from_file(self, file_path):
j_str = ''
try:
f = open(file_path)
j_str = f.read()
f.close()
except FileExistsError or FileNotFoundError:
GlobalStorage.debug('couldn\'t open file')
return
# IMPORTANT: FIRST, TRANSLATE THE PACKAGE (METACODE files -> SRC CODE files)
PackageTranslator = self.get_class_from_file(file_path='../pyScript_PackageTranslator',
file_name='pyScript_PackageTranslator',
class_name='PackageTranslator')
package_translator = PackageTranslator(os.path.dirname(os.path.abspath(file_path)))
# self.parse_nodes(j_str, os.path.dirname(file_path), os.path.splitext(os.path.basename(file_path))[0])
self.parse_nodes(j_str, os.path.dirname(file_path), os.path.splitext(os.path.basename(file_path))[0])
def parse_nodes(self, j_str, package_path, package_name):
import json
# strict=False has to be to allow 'control characters' like '\n' for newline when loading the json
j_obj = json.loads(j_str, strict=False)
GlobalStorage.debug(j_obj['type'])
if j_obj['type'] != 'vyScriptFP nodes package':
return
# package_title = j_obj['title']
# package_description = j_obj['description']
j_nodes_list = j_obj['nodes']
num_nodes = len(j_nodes_list)
for ni in range(num_nodes): # new node
j_node = j_nodes_list[ni]
new_node = Node()
node_title = j_node['title']
node_class_name = j_node['class name']
node_description = j_node['description']
node_type = j_node['type']
node_has_main_widget = j_node['has main widget']
node_main_widget_pos = j_node['widget position'] if node_has_main_widget else None
node_design_style = j_node['design style']
node_color = j_node['color']
# every node has a custom module name which differs from it's name to prevent import issues when using
# multiple (different) Nodes with same titles
# FOR FURTHER EXPLANATION: see node manager
node_module_name = j_node['module name']
module_name_separator = '___'
# CUSTOM CLASS IMPORTS ----------------------------------------------------------------------------
# creating all the necessary path variables here for all potentially imported classes
# IMPORT NODE INSTANCE SUBCLASS
node_instance_class_file_path = package_path+'/nodes/'+node_module_name+'/'
node_instance_widgets_file_path = node_instance_class_file_path+'/widgets'
node_instance_filename = node_module_name # the NI file's name is just the 'module name'
new_node_instance_class = self.get_class_from_file(file_path=node_instance_class_file_path,
file_name=node_instance_filename,
class_name=node_class_name+'_NodeInstance')
self.all_node_instance_classes[new_node] = new_node_instance_class
# IMPORT MAIN WIDGET
if node_has_main_widget:
main_widget_filename = node_module_name+module_name_separator+'main_widget'
new_node.main_widget_class = self.get_class_from_file(file_path=node_instance_widgets_file_path,
file_name=main_widget_filename,
class_name=node_class_name+'_NodeInstance_MainWidget')
# I need to create the dict for the node's potential custom input widgets already here
self.custom_node_input_widget_classes[new_node] = {}
for w_name in j_node['custom input widgets']:
input_widget_filename = node_module_name+module_name_separator+w_name
custom_widget_class = self.get_class_from_file(file_path=node_instance_widgets_file_path,
file_name=input_widget_filename,
class_name=w_name+'_PortInstanceWidget')
self.custom_node_input_widget_classes[new_node][w_name] = custom_widget_class
# note: the input widget classes get imported below in the loop
# ---------------------------------------------------------------------------------------------------
j_n_inputs = j_node['inputs']
inputs = []
num_inputs = len(j_n_inputs)
for ii in range(num_inputs):
j_input = j_n_inputs[ii]
i_type = j_input['type']
i_label = j_input['label']
i_has_widget = None
i_widget_type = ''
i_widget_name = ''
i_widget_pos = None
if i_type == 'data':
i_has_widget = j_input['has widget']
if i_has_widget:
i_widget_type = j_input['widget type']
i_widget_pos = j_input['widget position']
if i_widget_type == 'custom widget':
i_widget_name = j_input['widget name']
new_input = NodePort()
new_input.type = i_type
new_input.label = i_label
if i_has_widget:
new_input.widget_type = i_widget_type
new_input.widget_name = i_widget_name
if i_widget_pos:
new_input.widget_pos = i_widget_pos
else:
new_input.widget_type = 'None'
inputs.append(new_input)
j_n_outputs = j_node['outputs']
outputs = []
num_outputs = len(j_n_outputs)
for oi in range(num_outputs):
j_output = j_n_outputs[oi]
o_type = j_output['type']
o_label = j_output['label']
new_output = NodePort()
new_output.type = o_type
new_output.label = o_label
outputs.append(new_output)
new_node.title = node_title
new_node.description = node_description
new_node.type = node_type
new_node.package = package_name
new_node.has_main_widget = node_has_main_widget
if node_has_main_widget:
new_node.main_widget_pos = node_main_widget_pos
new_node.design_style = node_design_style
new_node.color = QColor(node_color)
# new_node.code = node_code
new_node.inputs = inputs
new_node.outputs = outputs
# self.node_images[new_node] = self.get_node_image(new_node)
self.custom_nodes.append(new_node)
self.all_nodes.append(new_node)
GlobalStorage.debug(len(self.custom_nodes), 'nodes imported')
# def get_node_image(self, node):
# # create picture
# render_view = RenderView(self, node, self.all_node_instance_classes[node])
# img = render_view.get_img()
# del render_view
# return img
def get_class_from_file(self, file_path, file_name, class_name):
GlobalStorage.debug(file_path)
GlobalStorage.debug(file_name)
GlobalStorage.debug(class_name)
sys.path.append(file_path)
new_module = __import__(file_name, fromlist=[class_name])
new_class = getattr(new_module, class_name)
return new_class
def parse_project(self, j_obj):
if j_obj['general info']['type'] != 'pyScriptFP project file':
return
for s in j_obj['scripts']: # fill flows
self.try_to_create_new_script(config=s)
def on_save_project_triggered(self):
file_name = ''
file_name = QFileDialog.getSaveFileName(self, 'select location and give file name',
'../saves', 'PyScript Project(*.pypro)')[0]
if file_name != '':
self.save_project(file_name)
def save_project(self, file_name):
import json
file = None
try:
file = open(file_name, 'w')
except FileNotFoundError:
GlobalStorage.debug('couldn\'t open file')
return
general_project_info_dict = {'type': 'pyScriptFP project file'}
scripts_data = []
for script in self.scripts:
scripts_data.append(script.get_json_data())
whole_project_dict = {'general info': general_project_info_dict,
'scripts': scripts_data}
json_str = json.dumps(whole_project_dict)
GlobalStorage.debug(json_str)
file.write(json_str)
file.close()
| 39.676692 | 124 | 0.619481 |
cfca473c7636af7a265aadb872e696e60bca57e4
| 213 |
py
|
Python
|
2021/RoundC/tmp.py
|
Akash671/KickStart
|
7cf7e572408203c881d56989fb37e6270bd696f0
|
[
"CC0-1.0"
] | 1 |
2021-03-12T08:39:01.000Z
|
2021-03-12T08:39:01.000Z
|
2021/RoundC/tmp.py
|
Akash671/KickStart
|
7cf7e572408203c881d56989fb37e6270bd696f0
|
[
"CC0-1.0"
] | null | null | null |
2021/RoundC/tmp.py
|
Akash671/KickStart
|
7cf7e572408203c881d56989fb37e6270bd696f0
|
[
"CC0-1.0"
] | 1 |
2021-03-20T18:55:52.000Z
|
2021-03-20T18:55:52.000Z
|
# Python program showing no need to
# use global keyword for accessing
# a global value
# global variable
a = 15
b = 10
# function to perform addition
def add():
c = a + b
print(c)
# calling a function
add()
| 13.3125 | 35 | 0.690141 |
8ff5cab7fb4ce3e6dd3d1a109f20f0119467486f
| 471 |
py
|
Python
|
pacman-termux/test/pacman/tests/smoke002.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/smoke002.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/smoke002.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Install packages with huge descriptions"
p1 = pmpkg("pkg1")
p1.desc = 'A' * 500 * 1024
self.addpkg(p1)
p2 = pmpkg("pkg2")
p2.desc = 'A' * 600 * 1024
self.addpkg(p2)
self.args = "-U %s %s" % (p1.filename(), p2.filename())
# We error out when fed a package with an invalid description; the second one
# fits the bill in this case as the desc is > 512K
self.addrule("PACMAN_RETCODE=1")
self.addrule("!PKG_EXIST=pkg1")
self.addrule("!PKG_EXIST=pkg1")
| 26.166667 | 77 | 0.692144 |
712eefca56c390466eeff675972b4137318849fa
| 4,390 |
py
|
Python
|
kollektiv5gui/views/DatasetTableWidget.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 3 |
2019-03-21T17:02:55.000Z
|
2019-04-04T18:16:10.000Z
|
kollektiv5gui/views/DatasetTableWidget.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 11 |
2019-10-30T12:05:39.000Z
|
2022-03-11T23:43:54.000Z
|
kollektiv5gui/views/DatasetTableWidget.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 1 |
2019-10-30T12:04:00.000Z
|
2019-10-30T12:04:00.000Z
|
import json
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QCursor, QColor
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QMenu
from kollektiv5gui.util import api
from kollektiv5gui.models.Dataset import Dataset
class DatasetTableWidget(QTableWidget):
"""
This table displays contains all classes of a dataset. It displays a
preview image, the class ids and a textual description of the classes.
"""
def __init__(self, mainWindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mainWindow = mainWindow
self.verticalHeader().setVisible(False)
self.horizontalHeader().setStretchLastSection(True)
self.setColumnCount(3)
self.setColumnWidth(0, 48)
self.setColumnWidth(1, 64)
self.setColumnWidth(2, 256)
self.setHorizontalHeaderLabels([
'ClassID',
'Preview',
'Textual Name',
])
self.__dataset = mainWindow.getDataset()
self.renderDataset()
self.itemSelectionChanged.connect(self.onItemSelectionChanged)
def contextMenuEvent(self, event):
"""
Right click anywhere on the table.
We only care about the current row however,
as this defines the used class id.
"""
tableMenu = QMenu()
sendSampleAction = tableMenu.addAction('Send Sample to API')
sendSampleAction.triggered.connect(self.classifySelected)
tableMenu.exec_(QCursor.pos())
def onItemSelectionChanged(self):
"""
Handler function called by Qt whenever the user changes the selected
items
"""
self.mainWindow.generateButtonSelected.setEnabled(
len(self.selectedItems()) > 0
)
def getSelectedClasses(self):
"""
Returns a list of all selected class ids.
"""
indexes = self.selectionModel().selection().indexes()
if indexes:
rows = set()
classes = []
for i in indexes:
rows.add(i.row())
for row in rows:
classes.append(self.__dataset.getClasses()[row])
return classes
return []
def classifySelected(self):
"""
Sends the preview images of all selected classes to the API and prints
the classification result.
"""
classes = self.getSelectedClasses()
for c in classes:
res = api.classifyFile(c.thumbnailPath)
self.mainWindow.log(json.dumps(res))
def renderDataset(self):
"""
Open a json formatted dataset specification from a file and display
the contained information.
"""
self.setRowCount(self.__dataset.getClassesCount())
i = 0
unknownClassBackground = QColor(250, 120, 100)
for c in self.__dataset.getClasses():
classId = QTableWidgetItem(str(c.id))
# disable editing of the class id
# this is done for all cells within this row (and for every row)
classId.setFlags(classId.flags() ^ Qt.ItemIsEditable)
preview = QTableWidgetItem()
# c.thumbnailPath contains a filename, passing this value to the
# constructor of QPixmap automatically loads an image
preview.setData(Qt.DecorationRole, QPixmap(c.thumbnailPath))
preview.setFlags(preview.flags() ^ Qt.ItemIsEditable)
name = QTableWidgetItem(c.name)
name.setFlags(name.flags() ^ Qt.ItemIsEditable)
if not c.known:
classId.setBackground(unknownClassBackground)
preview.setBackground(unknownClassBackground)
name.setBackground(unknownClassBackground)
self.setItem(i, 0, classId)
self.setItem(i, 1, preview)
self.setItem(i, 2, name)
# make sure the row is as large as the image within
# (we just assume a height of 64 pixels)
self.setRowHeight(i, 64)
i += 1
def tableClick(self, x, y):
"""
Handler function called by Qt whenever the user clicks on an entry in
the table.
Normally it should highlight the complete row, not just the clicked
cell.
This does not seem to work reliably however...
"""
self.selectRow(x)
| 35.12 | 78 | 0.615262 |
a4489dffe718a97ccb74e3c9f52f9b17c3e92dd1
| 318 |
py
|
Python
|
src/onegov/onboarding/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/onboarding/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/onboarding/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import logging
log = logging.getLogger('onegov.onboarding') # noqa
log.addHandler(logging.NullHandler()) # noqa
from translationstring import TranslationStringFactory
_ = TranslationStringFactory('onegov.onboarding') # noqa
from onegov.onboarding.app import OnboardingApp
__all__ = ['_', 'log', 'OnboardingApp']
| 28.909091 | 57 | 0.783019 |
a44b22a980e8ffa2fe3644e6346b6c4921cf8ee2
| 73 |
py
|
Python
|
src/showcase/__init__.py
|
th-koeln-intia/ip-sprachassistent-team1
|
69fbc06a326da91fd3d84f222eba6cd2b1a79975
|
[
"MIT"
] | 1 |
2021-04-28T09:45:34.000Z
|
2021-04-28T09:45:34.000Z
|
src/showcase/__init__.py
|
th-koeln-intia/ip-sprachassistent-team1
|
69fbc06a326da91fd3d84f222eba6cd2b1a79975
|
[
"MIT"
] | 1 |
2020-09-24T07:20:16.000Z
|
2020-09-24T07:20:16.000Z
|
src/showcase/__init__.py
|
th-koeln-intia/ip-sprachassistent-team1
|
69fbc06a326da91fd3d84f222eba6cd2b1a79975
|
[
"MIT"
] | 1 |
2020-12-04T13:38:33.000Z
|
2020-12-04T13:38:33.000Z
|
from showcase import showcase
def setup():
return showcase.setup()
| 12.166667 | 29 | 0.726027 |
a4a0c0f4e8de3d2fc701771f2ec4f0812f3c362a
| 359 |
py
|
Python
|
exercises/ja/solution_03_09_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/ja/solution_03_09_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/ja/solution_03_09_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
from spacy.tokens import Token
nlp = spacy.blank("ja")
# デフォルト値がFalseである拡張属性「is_country」をトークンに追加
Token.set_extension("is_country", default=False)
# テキストを処理し、「スペイン」のトークンについてis_country属性をTrueにする
doc = nlp("私はスペインに住んでいます。")
doc[2]._.is_country = True
# すべてのトークンについて、文字列とis_country属性を表示
print([(token.text, token._.is_country) for token in doc])
| 23.933333 | 58 | 0.785515 |
74b00c2b6dfd171d2e4c14614c92073db83d7af2
| 419 |
py
|
Python
|
pacman-termux/test/pacman/tests/epoch004.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/epoch004.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/epoch004.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Sysupgrade with same version, different epochs"
sp = pmpkg("dummy", "2:2.0-1")
sp.files = ["bin/dummynew"]
self.addpkg2db("sync", sp)
lp = pmpkg("dummy", "1:2.0-1")
lp.files = ["bin/dummyold"]
self.addpkg2db("local", lp)
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=dummy|2:2.0-1")
self.addrule("FILE_EXIST=bin/dummynew")
self.addrule("!FILE_EXIST=bin/dummyold")
| 24.647059 | 67 | 0.699284 |
2d104ecbf49f42703884e2800e25b87173a1113e
| 472 |
py
|
Python
|
checker/responsivesecurity/product_key.py
|
fausecteam/faustctf-2019-responsivesecurity
|
65b4e02bdc9de278166c38697ab992638977d511
|
[
"0BSD"
] | null | null | null |
checker/responsivesecurity/product_key.py
|
fausecteam/faustctf-2019-responsivesecurity
|
65b4e02bdc9de278166c38697ab992638977d511
|
[
"0BSD"
] | null | null | null |
checker/responsivesecurity/product_key.py
|
fausecteam/faustctf-2019-responsivesecurity
|
65b4e02bdc9de278166c38697ab992638977d511
|
[
"0BSD"
] | null | null | null |
import urllib.request
import ssl
import nacl.encoding
import nacl.public
from .resources import PRIVATE_KEY
def get_url(url):
return urllib.request.urlopen(url, context=ssl._create_unverified_context()).read()
def get_from_api(endpoint):
sk = nacl.public.PrivateKey(PRIVATE_KEY, nacl.encoding.HexEncoder)
box = nacl.public.SealedBox(sk)
enc = get_url(endpoint+"/get_product_key")
return box.decrypt(enc, encoder=nacl.encoding.HexEncoder).decode()
| 27.764706 | 87 | 0.769068 |
743b6debf39ce598a411de981da701a3a8d87d62
| 29,795 |
py
|
Python
|
Openharmony v1.0/third_party/ltp/testcases/kernel/power_management/lib/pm_sched_mc.py
|
clkbit123/TheOpenHarmony
|
0e6bcd9dee9f1a2481d762966b8bbd24baad6159
|
[
"MIT"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/ltp/testcases/kernel/power_management/lib/pm_sched_mc.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/third_party/ltp/testcases/kernel/power_management/lib/pm_sched_mc.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
''' Reusable functions related to sched mc FVT are put together
'''
import os
import sys
import re
from time import time
__author__ = "Vaidyanathan Srinivasan <[email protected]>"
__author__ = "Poornima Nayak <[email protected]>"
cpu_map = {}
stats_start = {}
stats_stop = {}
stats_percentage = {}
intr_start = []
intr_stop = []
cpu_count = 0
socket_count = 0
cpu1_max_intr = 0
cpu2_max_intr = 0
intr_stat_timer_0 = []
siblings_list = []
def clear_dmesg():
'''
Clears dmesg
'''
try:
os.system('dmesg -c >/dev/null')
except OSError as e:
print('Clearing dmesg failed', e)
sys.exit(1)
def count_num_cpu():
''' Returns number of cpu's in system
'''
try:
cpuinfo = open('/proc/cpuinfo', 'r')
global cpu_count
for line in cpuinfo:
if line.startswith('processor'):
cpu_count += 1
cpuinfo.close()
except IOError as e:
print("Could not get cpu count", e)
sys.exit(1)
def count_num_sockets():
''' Returns number of cpu's in system
'''
socket_list = []
global socket_count
try:
for i in range(0, cpu_count):
phy_pkg_file = '/sys/devices/system/cpu/cpu%s' % i
phy_pkg_file += '/topology/physical_package_id'
socket_id = open(phy_pkg_file).read().rstrip()
if socket_id not in socket_list:
socket_list.append(socket_id)
socket_count = socket_count + 1
except Exception as details:
print("INFO: Failed to get number of sockets in system", details)
sys.exit(1)
def is_multi_socket():
'''Return 1 if the system is multi socket else return 0
'''
try:
if socket_count > 1:
return 1
else:
return 0
except Exception:
print("Failed to check if system is multi socket system")
sys.exit(1)
def is_hyper_threaded():
'''Return 1 if the system is hyper threaded else return 0
'''
try:
file_cpuinfo = open("/proc/cpuinfo", 'r')
for line in file_cpuinfo:
if line.startswith('siblings'):
siblings = line.split(":")
if line.startswith('cpu cores'):
cpu_cores = line.split(":")
break
if int( siblings[1] ) / int( cpu_cores[1] )> 1:
file_cpuinfo.close()
return 1
else:
return 0
except Exception:
print("Failed to check if system is hyper-threaded")
sys.exit(1)
def is_multi_core():
''' Return true if system has sockets has multiple cores
'''
try:
file_cpuinfo = open("/proc/cpuinfo", 'r')
for line in file_cpuinfo:
if line.startswith('siblings'):
siblings = line.split(":")
if line.startswith('cpu cores'):
cpu_cores = line.split(":")
break
if int( siblings[1] ) == int( cpu_cores[1] ):
if int( cpu_cores[1] ) > 1:
multi_core = 1
else:
multi_core = 0
else:
num_of_cpus = int(siblings[1]) / int(cpu_cores[1])
if num_of_cpus > 1:
multi_core = 1
else:
multi_core = 0
file_cpuinfo.close()
return multi_core
except Exception:
print("Failed to check if system is multi core system")
sys.exit(1)
def get_hyper_thread_count():
''' Return number of threads in CPU. For eg for x3950 this function
would return 2. In future if 4 threads are supported in CPU, this
routine would return 4
'''
try:
file_cpuinfo = open("/proc/cpuinfo", 'r')
for line in file_cpuinfo:
if line.startswith('siblings'):
siblings = line.split(":")
if line.startswith('cpu cores'):
cpu_cores = line.split(":")
break
return( int( siblings[1] ) / int( cpu_cores[1] ) )
except Exception:
print("Failed to check if system is hyper-threaded")
sys.exit(1)
def map_cpuid_pkgid():
''' Routine to map physical package id to cpu id
'''
if is_hyper_threaded():
core_info = {}
try:
for i in range(0, cpu_count):
phy_pkg_file = '/sys/devices/system/cpu/cpu%s' % i
phy_pkg_file += '/topology/physical_package_id'
core_file = '/sys/devices/system/cpu/cpu%s' % i
core_file += '/topology/core_id'
core_id = open(core_file).read().rstrip()
cpu_phy_id = open(phy_pkg_file).read().rstrip()
if not cpu_phy_id in list(cpu_map.keys()):
core_info = {}
else:
core_info = cpu_map[cpu_phy_id]
if not core_id in list(core_info.keys()):
core_info[core_id] = [i]
else:
core_info[core_id].append(i)
cpu_map[cpu_phy_id] = core_info
except Exception as details:
print("Package, core & cpu map table creation failed", e)
sys.exit(1)
else:
for i in range(0, cpu_count):
try:
phy_pkg_file = '/sys/devices/system/cpu/cpu%s' %i
phy_pkg_file += '/topology/physical_package_id'
cpu_phy_id = open(phy_pkg_file).read().rstrip()
if not cpu_phy_id in list(cpu_map.keys()):
cpu_map[cpu_phy_id] = [i]
else:
cpu_map[cpu_phy_id].append(i)
except IOError as e:
print("Mapping of CPU to pkg id failed", e)
sys.exit(1)
def generate_sibling_list():
''' Routine to generate siblings list
'''
try:
for i in range(0, cpu_count):
siblings_file = '/sys/devices/system/cpu/cpu%s' % i
siblings_file += '/topology/thread_siblings_list'
threads_sibs = open(siblings_file).read().rstrip()
thread_ids = threads_sibs.split("-")
if not thread_ids in siblings_list:
siblings_list.append(thread_ids)
except Exception as details:
print("Exception in generate_siblings_list", details)
sys.exit(1)
def get_siblings(cpu_id):
''' Return siblings of cpu_id
'''
try:
cpus = ""
for i in range(0, len(siblings_list)):
for cpu in siblings_list[i]:
if cpu_id == cpu:
for j in siblings_list[i]:
# Exclude cpu_id in the list of siblings
if j != cpu_id:
cpus += j
return cpus
return cpus
except Exception as details:
print("Exception in get_siblings", details)
sys.exit(1)
def get_proc_data(stats_list):
''' Read /proc/stat info and store in dictionary
'''
try:
file_procstat = open("/proc/stat", 'r')
for line in file_procstat:
if line.startswith('cpu'):
data = line.split()
stats_list[data[0]] = data
file_procstat.close()
except OSError as e:
print("Could not read statistics", e)
sys.exit(1)
def get_proc_loc_count(loc_stats):
''' Read /proc/interrupts info and store in list
'''
try:
file_procstat = open("/proc/interrupts", 'r')
for line in file_procstat:
if line.startswith(' LOC:') or line.startswith('LOC:'):
data = line.split()
for i in range(0, cpu_count):
# To skip LOC
loc_stats.append(data[i+1])
file_procstat.close()
return
except Exception as details:
print("Could not read interrupt statistics", details)
sys.exit(1)
def set_sched_mc_power(sched_mc_level):
''' Routine to set sched_mc_power_savings to required level
'''
try:
os.system('echo %s > \
/sys/devices/system/cpu/sched_mc_power_savings 2>/dev/null'
% sched_mc_level)
get_proc_data(stats_start)
except OSError as e:
print("Could not set sched_mc_power_savings to", sched_mc_level, e)
sys.exit(1)
def set_sched_smt_power(sched_smt_level):
''' Routine to set sched_smt_power_savings to required level
'''
try:
os.system('echo %s > \
/sys/devices/system/cpu/sched_smt_power_savings 2>/dev/null'
% sched_smt_level)
get_proc_data(stats_start)
except OSError as e:
print("Could not set sched_smt_power_savings to", sched_smt_level, e)
sys.exit(1)
def set_timer_migration_interface(value):
''' Set value of timer migration interface to a value
passed as argument
'''
try:
os.system('echo %s > \
/proc/sys/kernel/timer_migration 2>/dev/null' % value)
except OSError as e:
print("Could not set timer_migration to ", value, e)
sys.exit(1)
def get_job_count(stress, workload, sched_smt):
''' Returns number of jobs/threads to be triggered
'''
try:
if stress == "thread":
threads = get_hyper_thread_count()
if stress == "partial":
threads = cpu_count / socket_count
if is_hyper_threaded():
if workload == "ebizzy" and int(sched_smt) ==0:
threads = threads / get_hyper_thread_count()
if workload == "kernbench" and int(sched_smt) < 2:
threads = threads / get_hyper_thread_count()
if stress == "full":
threads = cpu_count
if stress == "single_job":
threads = 1
duration = 180
return threads
except Exception as details:
print("get job count failed ", details)
sys.exit(1)
def trigger_ebizzy (sched_smt, stress, duration, background, pinned):
''' Triggers ebizzy workload for sched_mc=1
testing
'''
try:
threads = get_job_count(stress, "ebizzy", sched_smt)
workload = "ebizzy"
olddir = os.getcwd()
path = '%s/testcases/bin' % os.environ['LTPROOT']
os.chdir(path)
workload_file = ""
for file_name in os.listdir('.'):
if file_name == workload:
workload_file = file_name
break
if workload_file == "":
print("INFO: ebizzy benchmark not found")
os.chdir(olddir)
sys.exit(1)
get_proc_data(stats_start)
get_proc_loc_count(intr_start)
try:
if background == "yes":
succ = os.system('./ebizzy -t%s -s4096 -S %s >/dev/null &'
% (threads, duration))
else:
if pinned == "yes":
succ = os.system('taskset -c %s ./ebizzy -t%s -s4096 -S %s >/dev/null'
% (cpu_count -1, threads, duration))
else:
succ = os.system('./ebizzy -t%s -s4096 -S %s >/dev/null'
% (threads, duration))
if succ == 0:
print("INFO: ebizzy workload triggerd")
os.chdir(olddir)
#Commented bcoz it doesnt make sense to capture it when workload triggered
#in background
#get_proc_loc_count(intr_stop)
#get_proc_data(stats_stop)
else:
print("INFO: ebizzy workload triggerd failed")
os.chdir(olddir)
sys.exit(1)
except Exception as details:
print("Ebizzy workload trigger failed ", details)
sys.exit(1)
except Exception as details:
print("Ebizzy workload trigger failed ", details)
sys.exit(1)
def trigger_kernbench (sched_smt, stress, background, pinned, perf_test):
''' Trigger load on system like kernbench.
Copys existing copy of LTP into as LTP2 and then builds it
with make -j
'''
olddir = os.getcwd()
try:
threads = get_job_count(stress, "kernbench", sched_smt)
dst_path = "/root"
workload = "kernbench"
olddir = os.getcwd()
path = '%s/testcases/bin' % os.environ['LTPROOT']
os.chdir(path)
workload_file = ""
for file_name in os.listdir('.'):
if file_name == workload:
workload_file = file_name
break
if workload_file != "":
benchmark_path = path
else:
print("INFO: kernbench benchmark not found")
os.chdir(olddir)
sys.exit(1)
os.chdir(dst_path)
linux_source_dir=""
for file_name in os.listdir('.'):
if file_name.find("linux-2.6") != -1 and os.path.isdir(file_name):
linux_source_dir=file_name
break
if linux_source_dir != "":
os.chdir(linux_source_dir)
else:
print("INFO: Linux kernel source not found in /root. Workload\
Kernbench cannot be executed")
sys.exit(1)
get_proc_data(stats_start)
get_proc_loc_count(intr_start)
if pinned == "yes":
os.system ( 'taskset -c %s %s/kernbench -o %s -M -H -n 1 \
>/dev/null 2>&1 &' % (cpu_count-1, benchmark_path, threads))
# We have to delete import in future
import time
time.sleep(240)
stop_wkld("kernbench")
else:
if background == "yes":
os.system ( '%s/kernbench -o %s -M -H -n 1 >/dev/null 2>&1 &' \
% (benchmark_path, threads))
else:
if perf_test == "yes":
os.system ( '%s/kernbench -o %s -M -H -n 1 >/dev/null 2>&1' \
% (benchmark_path, threads))
else:
os.system ( '%s/kernbench -o %s -M -H -n 1 >/dev/null 2>&1 &' \
% (benchmark_path, threads))
# We have to delete import in future
import time
time.sleep(240)
stop_wkld("kernbench")
print("INFO: Workload kernbench triggerd")
os.chdir(olddir)
except Exception as details:
print("Workload kernbench trigger failed ", details)
sys.exit(1)
def trigger_workld(sched_smt, workload, stress, duration, background, pinned, perf_test):
''' Triggers workload passed as argument. Number of threads
triggered is based on stress value.
'''
try:
if workload == "ebizzy":
trigger_ebizzy (sched_smt, stress, duration, background, pinned)
if workload == "kernbench":
trigger_kernbench (sched_smt, stress, background, pinned, perf_test)
except Exception as details:
print("INFO: Trigger workload failed", details)
sys.exit(1)
def generate_report():
''' Generate report of CPU utilization
'''
cpu_labels = ('cpu', 'user', 'nice', 'system', 'idle', 'iowait', 'irq',
'softirq', 'x', 'y')
if (not os.path.exists('/procstat')):
os.mkdir('/procstat')
get_proc_data(stats_stop)
reportfile = open('/procstat/cpu-utilisation', 'a')
debugfile = open('/procstat/cpu-utilisation.debug', 'a')
for l in stats_stop:
percentage_list = []
total = 0
for i in range(1, len(stats_stop[l])):
stats_stop[l][i] = int(stats_stop[l][i]) - int(stats_start[l][i])
total += stats_stop[l][i]
percentage_list.append(l)
for i in range(1, len(stats_stop[l])):
percentage_list.append(float(stats_stop[l][i])*100/total)
stats_percentage[l] = percentage_list
for i in range(0, len(cpu_labels)):
print(cpu_labels[i], '\t', end=' ', file=debugfile)
print(file=debugfile)
for l in sorted(stats_stop.keys()):
print(l, '\t', end=' ', file=debugfile)
for i in range(1, len(stats_stop[l])):
print(stats_stop[l][i], '\t', end=' ', file=debugfile)
print(file=debugfile)
for i in range(0, len(cpu_labels)):
print(cpu_labels[i], '\t', end=' ', file=reportfile)
print(file=reportfile)
for l in sorted(stats_percentage.keys()):
print(l, '\t', end=' ', file=reportfile)
for i in range(1, len(stats_percentage[l])):
print(" %3.4f" % stats_percentage[l][i], end=' ', file=reportfile)
print(file=reportfile)
#Now get the package ID information
try:
print("cpu_map: ", cpu_map, file=debugfile)
keyvalfile = open('/procstat/keyval', 'a')
print("nr_packages=%d" % len(cpu_map), file=keyvalfile)
print("system-idle=%3.4f" % (stats_percentage['cpu'][4]), file=keyvalfile)
for pkg in sorted(cpu_map.keys()):
if is_hyper_threaded():
for core in sorted(cpu_map[pkg].keys()):
total_idle = 0
total = 0
for cpu in cpu_map[pkg][core]:
total_idle += stats_stop["cpu%d" % cpu][4]
for i in range(1, len(stats_stop["cpu%d" % cpu])):
total += stats_stop["cpu%d" % cpu][i]
else:
total_idle = 0
total = 0
for cpu in cpu_map[pkg]:
total_idle += stats_stop["cpu%d" % cpu][4]
for i in range(1, len(stats_stop["cpu%d" % cpu])):
total += stats_stop["cpu%d" % cpu][i]
print("Package: ", pkg, "Idle %3.4f%%" \
% (float(total_idle)*100/total), file=reportfile)
print("package-%s=%3.4f" % \
(pkg, (float(total_idle)*100/total)), file=keyvalfile)
except Exception as details:
print("Generating utilization report failed: ", details)
sys.exit(1)
#Add record delimiter '\n' before closing these files
print(file=debugfile)
debugfile.close()
print(file=reportfile)
reportfile.close()
print(file=keyvalfile)
keyvalfile.close()
def generate_loc_intr_report():
''' Generate interrupt report of CPU's
'''
try:
if (not os.path.exists('/procstat')):
os.mkdir('/procstat')
get_proc_loc_count(intr_stop)
reportfile = open('/procstat/cpu-loc_interrupts', 'a')
print("==============================================", file=reportfile)
print(" Local timer interrupt stats ", file=reportfile)
print("==============================================", file=reportfile)
for i in range(0, cpu_count):
intr_stop[i] = int(intr_stop[i]) - int(intr_start[i])
print("CPU%s: %s" %(i, intr_stop[i]), file=reportfile)
print(file=reportfile)
reportfile.close()
except Exception as details:
print("Generating interrupt report failed: ", details)
sys.exit(1)
def record_loc_intr_count():
''' Record Interrupt statistics when timer_migration
was disabled
'''
try:
global intr_start, intr_stop
for i in range(0, cpu_count):
intr_stat_timer_0.append(intr_stop[i])
intr_start = []
intr_stop = []
except Exception as details:
print("INFO: Record interrupt statistics when timer_migration=0",details)
def expand_range(range_val):
'''
Expand the range of value into actual numbers
'''
ids_list = list()
try:
sep_comma = range_val.split(",")
for i in range(0, len(sep_comma)):
hyphen_values = sep_comma[i].split("-")
if len(hyphen_values) == 1:
ids_list.append(int(hyphen_values[0]))
else:
for j in range(int(hyphen_values[0]), int(hyphen_values[1])+1):
ids_list.append(j)
return(ids_list)
except Exception as details:
print("INFO: expand_pkg_grps failed ", details)
def is_quad_core():
'''
Read /proc/cpuinfo and check if system is Quad core
'''
try:
cpuinfo = open('/proc/cpuinfo', 'r')
for line in cpuinfo:
if line.startswith('cpu cores'):
cores = line.split("cpu cores")
num_cores = cores[1].split(":")
cpuinfo.close()
if int(num_cores[1]) == 4:
return(1)
else:
return(0)
except IOError as e:
print("Failed to get cpu core information", e)
sys.exit(1)
def validate_cpugrp_map(cpu_group, sched_mc_level, sched_smt_level):
'''
Verify if cpugrp belong to same package
'''
modi_cpu_grp = cpu_group[:]
try:
if is_hyper_threaded():
for pkg in sorted(cpu_map.keys()):
# if CPU utilized is across package this condition will be true
if len(modi_cpu_grp) != len(cpu_group):
break
for core in sorted(cpu_map[pkg].keys()):
core_cpus = cpu_map[pkg][core]
if core_cpus == modi_cpu_grp:
return 0
else:
#if CPUs used across the cores
for i in range(0, len(core_cpus)):
if core_cpus[i] in modi_cpu_grp:
modi_cpu_grp.remove(core_cpus[i])
if len(modi_cpu_grp) == 0:
return 0
#This code has to be deleted
#else:
# If sched_smt == 0 then its oky if threads run
# in different cores of same package
#if sched_smt_level > 0 :
#return 1
else:
for pkg in sorted(cpu_map.keys()):
pkg_cpus = cpu_map[pkg]
if len(cpu_group) == len(pkg_cpus):
if pkg_cpus == cpu_group:
return(0)
else:
if int(cpus_utilized[0]) in cpu_map[pkg] or int(cpus_utilized[1]) in cpu_map[pkg]:
return(0)
return(1)
except Exception as details:
print("Exception in validate_cpugrp_map: ", details)
sys.exit(1)
def verify_sched_domain_dmesg(sched_mc_level, sched_smt_level):
'''
Read sched domain information from dmesg.
'''
cpu_group = list()
try:
dmesg_info = os.popen('dmesg').read()
if dmesg_info != "":
lines = dmesg_info.split('\n')
for i in range(0, len(lines)):
if lines[i].endswith('CPU'):
groups = lines[i+1].split("groups:")
group_info = groups[1]
if group_info.find("(") != -1:
openindex=group_info.index("(")
closeindex=group_info.index(")")
group_info=group_info.replace\
(group_info[openindex:closeindex+1],"")
subgroup = group_info.split(",")
for j in range(0, len(subgroup)):
cpu_group = expand_range(subgroup[j])
status = validate_cpugrp_map(cpu_group, sched_mc_level,\
sched_smt_level)
if status == 1:
if is_quad_core() == 1:
if int(sched_mc_level) == 0:
return(0)
else:
return(1)
else:
return(1)
return(0)
else:
return(1)
except Exception as details:
print("Reading dmesg failed", details)
sys.exit(1)
def get_cpu_utilization(cpu):
''' Return cpu utilization of cpu_id
'''
try:
for l in sorted(stats_percentage.keys()):
if cpu == stats_percentage[l][0]:
return stats_percentage[l][1]
return -1
except Exception as details:
print("Exception in get_cpu_utilization", details)
sys.exit(1)
def validate_cpu_consolidation(stress, work_ld, sched_mc_level, sched_smt_level):
''' Verify if cpu's on which threads executed belong to same
package
'''
cpus_utilized = list()
threads = get_job_count(stress, work_ld, sched_smt_level)
try:
for l in sorted(stats_percentage.keys()):
#modify threshold
cpu_id = stats_percentage[l][0].split("cpu")
if cpu_id[1] == '':
continue
if int(cpu_id[1]) in cpus_utilized:
continue
if is_hyper_threaded():
if work_ld == "kernbench" and sched_smt_level < sched_mc_level:
siblings = get_siblings(cpu_id[1])
if siblings != "":
sib_list = siblings.split()
utilization = int(stats_percentage[l][1])
for i in range(0, len(sib_list)):
utilization += int(get_cpu_utilization("cpu%s" %sib_list[i]))
else:
utilization = stats_percentage[l][1]
if utilization > 40:
cpus_utilized.append(int(cpu_id[1]))
if siblings != "":
for i in range(0, len(sib_list)):
cpus_utilized.append(int(sib_list[i]))
else:
# This threshold wuld be modified based on results
if stats_percentage[l][1] > 40:
cpus_utilized.append(int(cpu_id[1]))
else:
if work_ld == "kernbench" :
if stats_percentage[l][1] > 50:
cpus_utilized.append(int(cpu_id[1]))
else:
if stats_percentage[l][1] > 70:
cpus_utilized.append(int(cpu_id[1]))
cpus_utilized.sort()
print("INFO: CPU's utilized ", cpus_utilized)
# If length of CPU's utilized is not = number of jobs exit with 1
if len(cpus_utilized) < threads:
return 1
status = validate_cpugrp_map(cpus_utilized, sched_mc_level, \
sched_smt_level)
if status == 1:
print("INFO: CPUs utilized is not in same package or core")
return(status)
except Exception as details:
print("Exception in validate_cpu_consolidation: ", details)
sys.exit(1)
def get_cpuid_max_intr_count():
'''Return the cpu id's of two cpu's with highest number of intr'''
try:
highest = 0
second_highest = 0
cpus_utilized = []
#Skipping CPU0 as it is generally high
for i in range(1, cpu_count):
if int(intr_stop[i]) > int(highest):
if highest != 0:
second_highest = highest
cpu2_max_intr = cpu1_max_intr
highest = int(intr_stop[i])
cpu1_max_intr = i
else:
if int(intr_stop[i]) > int(second_highest):
second_highest = int(intr_stop[i])
cpu2_max_intr = i
cpus_utilized.append(cpu1_max_intr)
cpus_utilized.append(cpu2_max_intr)
for i in range(1, cpu_count):
if i != cpu1_max_intr and i != cpu2_max_intr:
diff = second_highest - intr_stop[i]
''' Threshold of difference has to be manipulated '''
if diff < 10000:
print("INFO: Diff in interrupt count is below threshold")
cpus_utilized = []
return cpus_utilized
print("INFO: Interrupt count in other CPU's low as expected")
return cpus_utilized
except Exception as details:
print("Exception in get_cpuid_max_intr_count: ", details)
sys.exit(1)
def validate_ilb (sched_mc_level, sched_smt_level):
''' Validate if ilb is running in same package where work load is running
'''
try:
cpus_utilized = get_cpuid_max_intr_count()
if not cpus_utilized:
return 1
status = validate_cpugrp_map(cpus_utilized, sched_mc_level, sched_smt_level)
return status
except Exception as details:
print("Exception in validate_ilb: ", details)
sys.exit(1)
def reset_schedmc():
''' Routine to reset sched_mc_power_savings to Zero level
'''
try:
os.system('echo 0 > \
/sys/devices/system/cpu/sched_mc_power_savings 2>/dev/null')
except OSError as e:
print("Could not set sched_mc_power_savings to 0", e)
sys.exit(1)
def reset_schedsmt():
''' Routine to reset sched_smt_power_savings to Zero level
'''
try:
os.system('echo 0 > \
/sys/devices/system/cpu/sched_smt_power_savings 2>/dev/null')
except OSError as e:
print("Could not set sched_smt_power_savings to 0", e)
sys.exit(1)
def stop_wkld(work_ld):
''' Kill workload triggered in background
'''
try:
os.system('pkill %s 2>/dev/null' %work_ld)
if work_ld == "kernbench":
os.system('pkill make 2>/dev/null')
except OSError as e:
print("Exception in stop_wkld", e)
sys.exit(1)
| 35.639952 | 102 | 0.535123 |
748e1e1b17a2070f17bb8fc5cff4ce2b007e61bc
| 525 |
py
|
Python
|
books/PythonAutomate/google_spreadsheets/create_upload_spreadsheets.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/google_spreadsheets/create_upload_spreadsheets.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/google_spreadsheets/create_upload_spreadsheets.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""create_upload_spreadsheets.py
구글 스프레드 시트 문서 열기, 생성하기, 업로드 하기
"""
import ezsheets
# 등록된 문서 열기
sheet_id = "1jDZEdvSIh4TmZxccyy0ZXrH-ELlrwq8_YYiZrEOB4jg"
ss = ezsheets.Spreadsheet(sheet_id)
print(ss) # <Spreadsheet title="Bean Count", 1 sheets>
print(ss.title) # Bean Count
# 빈 스프레드 시트 문서 만들기
ss = ezsheets.createSpreadsheet("Title of My New Spreadsheet")
print(ss.title) # Title of My New Spreadsheet
ss.delete() # 스프레드 시트 삭제하기
# 이미 존재하는 엑셀 문서 업로드
ss = ezsheets.upload("example.xlsx")
print(ss.title)
print(ss.url)
| 23.863636 | 62 | 0.737143 |
7ac894034e1f619251fe3ec435d7496765057222
| 4,158 |
py
|
Python
|
official/audio/melgan/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/audio/melgan/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/audio/melgan/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MelGAN eval"""
import os
import numpy as np
from scipy.io.wavfile import write
from mindspore import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common.tensor import Tensor
import mindspore.context as context
from src.model import Generator
from src.model_utils.config import config as cfg
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
if __name__ == '__main__':
context.set_context(device_id=cfg.device_id)
if not os.path.exists(cfg.output_path):
os.mkdir(cfg.output_path)
net_G = Generator(alpha=cfg.leaky_alpha)
net_G.set_train(False)
# load checkpoint
param_dict = load_checkpoint(cfg.eval_model_path)
load_param_into_net(net_G, param_dict)
print('load model done !')
model = Model(net_G)
# get list
mel_path = cfg.eval_data_path
data_list = os.listdir(mel_path)
for data_name in data_list:
melpath = os.path.join(mel_path, data_name)
# data preprocessing
meldata = np.load(melpath)
meldata = (meldata + 5.0) / 5.0
pad_node = 0
if meldata.shape[1] < cfg.eval_length:
pad_node = cfg.eval_length - meldata.shape[1]
meldata = np.pad(meldata, ((0, 0), (0, pad_node)), mode='constant', constant_values=0.0)
meldata_s = meldata[np.newaxis, :, 0:cfg.eval_length]
# first frame
wav_data = np.array([])
output = model.predict(Tensor(meldata_s)).asnumpy().ravel()
wav_data = np.concatenate((wav_data, output))
# initialization parameters
repeat_frame = cfg.eval_length // 8
i = cfg.eval_length - repeat_frame
length = cfg.eval_length
num_weights = i
interval = (cfg.hop_size*repeat_frame) // num_weights
weights = np.linspace(0.0, 1.0, num_weights)
while i < meldata.shape[1]:
# data preprocessing
meldata_s = meldata[:, i:i+length]
if meldata_s.shape[1] != cfg.eval_length:
pad_node = cfg.hop_size * (cfg.eval_length-meldata_s.shape[1])
meldata_s = np.pad(meldata_s, ((0, 0), (0, cfg.eval_length-meldata_s.shape[1])), mode='edge')
meldata_s = meldata_s[np.newaxis, :, :]
# i-th frame
output = model.predict(Tensor(meldata_s)).asnumpy().ravel()
print('output{}={}'.format(i, output))
lenwav = cfg.hop_size*repeat_frame
lenout = 0
# overlap
for j in range(num_weights-1):
wav_data[-lenwav:-lenwav+interval] = weights[-j-1] * wav_data[-lenwav:-lenwav+interval] +\
weights[j] * output[lenout:lenout+interval]
lenwav = lenwav - interval
lenout = lenout + interval
wav_data[-lenwav:] = weights[-num_weights] * wav_data[-lenwav:] +\
weights[num_weights-1] * output[lenout:lenout+lenwav]
wav_data = np.concatenate((wav_data, output[cfg.hop_size*repeat_frame:]))
i = i + length - repeat_frame
if pad_node != 0:
wav_data = wav_data[:-pad_node]
# save as wav file
wav_data = 32768.0 * wav_data
out_path = os.path.join(cfg.output_path, 'restruction_' + data_name.replace('npy', 'wav'))
write(out_path, cfg.sample, wav_data.astype('int16'))
print('{} done!'.format(data_name))
| 37.8 | 109 | 0.62482 |
bb32bbe04e04b5f3fd070e1b2b1575b440eafdc4
| 490 |
py
|
Python
|
codeit/algorithm/fib_memo.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/fib_memo.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/fib_memo.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""피보나치 수열 Memoization"""
def fib_memo(n, cache):
if n < 3:
return 1
if n in cache:
return cache[n]
cache[n] = fib_memo(n - 2, cache) + fib_memo(n - 1, cache)
return cache[n]
def fib(n):
# n번째 피보나치 수를 담는 사전
fib_cache = {}
return fib_memo(n, fib_cache)
if __name__ == '__main__':
assert fib(10) == 55, 'fib(10) error'
assert fib(50) == 12586269025, 'fib(50) error'
assert fib(100) == 354224848179261915075, 'fib(100) error'
| 20.416667 | 62 | 0.581633 |
701e63a0eb1392bb35e5e801b97cc6aae8372040
| 2,121 |
py
|
Python
|
aero_info.py
|
kirtis26/Missile3D
|
23a868b34f7362dac5ce0dff254d990f4d0c4e92
|
[
"MIT"
] | null | null | null |
aero_info.py
|
kirtis26/Missile3D
|
23a868b34f7362dac5ce0dff254d990f4d0c4e92
|
[
"MIT"
] | null | null | null |
aero_info.py
|
kirtis26/Missile3D
|
23a868b34f7362dac5ce0dff254d990f4d0c4e92
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
def table_atm(h, parametr):
"""
Cтандартная атмосфера для высот h = -2000 м ... 80000 м (ГОСТ 4401-81)
arguments: h высота [м], parametr:
1 - температура [К];
2 - давление [Па];
3 - плотность [кг/м^3];
4 - местная скорость звука [м/с];
5 - динамическая вязкость [Па*с]
6 - кинематическая вязкость [м^2*с];
return: значение выбранного параметра на высоте h
"""
table = pd.read_csv('data_constants/table_atm.csv', names=['h', 'p', 'rho', 'T'], sep=',')
table_h = table['h']
table_p = table['p']
table_T = table['T']
table_rho = table['rho']
if parametr == 1:
return np.interp(h, table_h, table_T)
elif parametr == 2:
return np.interp(h, table_h, table_p)
elif parametr == 3:
return np.interp(h, table_h, table_rho)
elif parametr == 4:
p_h = np.interp(h, table_h, table_p)
rho_h = np.interp(h, table_h, table_rho)
k_x = 1.4
a_h = np.sqrt(k_x * p_h / rho_h)
return a_h
elif parametr == 5:
T_h = np.interp(h, table_h, table_T)
rho_h = np.interp(h, table_h, table_rho)
betta_s = 1.458*1e-6
S = 110.4
myu = betta_s * T_h**(3/2) / (T_h + S)
return myu
elif parametr == 6:
T_h = np.interp(h, table_h, table_T)
rho_h = np.interp(h, table_h, table_rho)
betta_s = 1.458*1e-6
S = 110.4
myu = betta_s * T_h**(3/2) / (T_h + S)
nyu = myu / rho_h
return nyu
else:
print("Ошибка: неверное значение при выборе параметра")
def Cx43(Mah):
"""
Ф-ция закона сопротивления 1943 года
arguments: число Маха
return: коэф-т лобового сопротивления Cx
"""
table = pd.read_csv('data_constants/table_cx43.csv', names=['mah', 'cx'], sep=',')
table_mah = table['mah']
table_cx = table['cx']
return np.interp(Mah, table_mah, table_cx)
| 31.656716 | 94 | 0.530882 |
70783165cd3fb9edb1ec92fcc5a029ba5024797c
| 590 |
py
|
Python
|
apps/quiver/forms.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2017-12-17T21:28:22.000Z
|
2018-02-02T14:44:58.000Z
|
apps/quiver/forms.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/quiver/forms.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
from django import forms
from django.utils.translation import gettext_lazy as _
from .models import AnalyticsService
class AnalyticsServiceForm(forms.ModelForm):
field_order = (
'name',
'description',
'url',
'api_key',
'visibility',
)
def __init__(self, *args, **kwargs):
super(AnalyticsServiceForm, self).__init__(*args, **kwargs)
class Meta:
model = AnalyticsService
fields = (
'name',
'description',
'url',
'api_key',
'visibility',
)
| 21.071429 | 67 | 0.555932 |
3b1680f8c48497f62608c5fbf5364bcabd9c3bf9
| 1,288 |
py
|
Python
|
leetcode/010-Regular-Expression-Matching/RegExpMatch.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/010-Regular-Expression-Matching/RegExpMatch.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/010-Regular-Expression-Matching/RegExpMatch.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
n_s = len(s)
n_star = 0
seg = []
i = len(p) - 1
while i > -1:
if p[i] == '*':
n_star += 1
seg.insert(0, p[i - 1:i + 1])
i -= 2
else:
seg.insert(0, p[i])
i -= 1
n_p = len(p) - n_star
m, n = n_p + 1, n_s + 1
dp = [[False for j in xrange(n)] for i in xrange(m)]
dp[0][0] = True
for i in xrange(1, m):
if len(seg[i - 1]) == 2:
dp[i][0] = True
else:
break
for i in xrange(1, m):
for j in xrange(1, n):
if dp[i][j - 1] and len(seg[i - 1]) == 2 and (seg[i - 1][0] == '.' or seg[i - 1][0] == s[j - 1]):
dp[i][j] = True
continue
if dp[i - 1][j] and len(seg[i - 1]) == 2:
dp[i][j] = True
continue
if dp[i - 1][j - 1] and (seg[i - 1][0] == '.' or seg[i - 1][0] == s[j - 1]):
dp[i][j] = True
continue
return dp[-1][-1]
| 28 | 113 | 0.321429 |
3b5a0df898048828f4c520e9b8d8fa29121269d4
| 2,459 |
py
|
Python
|
warf/urls.py
|
acaciawater/warf
|
2c2f5f38f6f681549f34e335e88fb6e7e4b7d229
|
[
"MIT"
] | null | null | null |
warf/urls.py
|
acaciawater/warf
|
2c2f5f38f6f681549f34e335e88fb6e7e4b7d229
|
[
"MIT"
] | null | null | null |
warf/urls.py
|
acaciawater/warf
|
2c2f5f38f6f681549f34e335e88fb6e7e4b7d229
|
[
"MIT"
] | null | null | null |
"""warf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from warf.views import HomeView
from acacia.data.views import DashGroupView
admin.autodiscover()
urlpatterns = patterns('warf.views',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^data/', include('acacia.data.urls',namespace='acacia')),
url(r'^(?P<name>[\w\s]+)$', DashGroupView.as_view(), name='dashboard-view'),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.IMG_URL, document_root=settings.IMG_ROOT)
from django.contrib.auth import views as auth_views
urlpatterns += patterns('',
url(r'^password/change/$',
auth_views.password_change,
name='password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='password_reset'),
url(r'^accounts/password/reset/done/$',
auth_views.password_reset_done,
name='password_reset_done'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='password_reset_complete'),
url(r'^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='password_reset_confirm'),
url(r'^accounts/', include('registration.backends.default.urls'))
)
| 42.396552 | 80 | 0.660838 |
8ed1051b90c83fd42c2125034f9fa9858f735a06
| 2,252 |
py
|
Python
|
pyventskalender/tag11.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
pyventskalender/tag11.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
pyventskalender/tag11.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
# Heute geht es um das Dictionary, `dict`, einen Container, mit dem man Daten
# mit "Stichworten" ablegen und auslesen kann.
#
# Wir bleiben thematisch beim Einlesen der Datei mit Gewichten.
# Wie wir sie lesen und den Inhalt extrahieren haben wir verstanden.
# Doch um aktuell bspw. die Frage zu beantworten, was das Höchstgewicht von
# Caro im beobachteten Zeitraum war, muss man relativ viel Code schreiben.
#
# Schöner wäre es, wenn wir für jede der Personen eine Liste von Gewichten
# bekommen könnten.
# Dabei hilft uns das Dictionary.
# %%
# Ein Dictionary wird mit `{}`-Klammern geschrieben.
# Es ordnet einem Schlüssel (Key, links vom `:`) einen Wert (Value, rechts vom
# `:`) zu
# Zu jedem Schlüssel gibt es nur einen Wert.
# Eine schöne Analogie ist ein Telefonbuch;
# für jeden Namen ist eine Nummer hinterlegt:
adressbuch = {
"Feuerwehr": "110",
"Polizei": "112",
"Herrmann": "07543 73135",
"Caro": "07314 93849193",
}
# Um die Nummer von jemandem zu bekommen, kann man per `[]`
# darauf zugreifen:
adressbuch["Polizei"]
# %%
# Ebenso kann man einen Wert ändern
adressbuch["Caro"] = "07314 4815"
# oder hinzufügen:
adressbuch["Paul"] = "07321 37294"
# %%
# Um zu prüfen, ob ein Eintrag für einen bestimmten Schlüssel vorliegt, prüft
# man mit `in`:
"Paul" in adressbuch
# %% Adressbuch erweitern -- Test 10
# Die Werte in einem Dictionary können auch selbst komplexere Objekte sein, wie
# bspw. eine Liste.
# Füge für "Wolfgang" eine Liste mit den Nummern "0176 84927413" und "07421
# 39495" hinzu.
# %% Extrahieren und in Dict speichern -- Test 20
# Genug mit Vorgeplänkel, jetzt zur eigentlichen Aufgabe.
# Vervollständige die folgende Funktion, sodass sie eine Datei mit dem gleichen
# Format wie `beispieldaten_gewichte.txt` einließt und als Ergebnis ein Dict
# ausgibt, das die Namen der gewogenen als Schlüssel und als Werte eine Liste
# von Gewichten als Zahl (`int`) hat.
#
# Bspw. erzeugt
# extrahiere_gewichte("beispieldaten_gewichte.txt")
# den Output
# {'Herrmann': [72, 73, 71, 74],
# 'Caro': [62, 60, 59, 60, 61, 60]}
#
# Hinweis: Verwende am besten Code von gestern wieder!
from typing import Dict, List
import re
def extrahiere_gewichte(dateiname: str) -> Dict[str, List[int]]:
pass
| 32.171429 | 79 | 0.723801 |
15e077649c4d54f45c9db25294bbe2de314c2d0f
| 1,523 |
py
|
Python
|
python/asyncio/example_3.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/asyncio/example_3.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/asyncio/example_3.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""
example_3.py
Just a short example demonstrating a simple state machine in Python
However, this one has delays that affect it
"""
import time
import queue
def check_time(func):
def wrapper(*args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
print(f'Task {args[0]} total elapsed time: {time.time() - start_time:.1f}')
return wrapper
def task(name, queue):
while not queue.empty():
count = queue.get()
total = 0
start_time = time.time()
for x in range(count):
print(f'Task {name} running')
time.sleep(1)
total += 1
yield
print(f'Task {name} total: {total}')
print(f'Task {name} total elapsed time: {time.time() - start_time:.1f}')
def main():
"""
This is the main entry point for the program
"""
# create the queue of 'work'
work_queue = queue.Queue()
# put some 'work' in the queue
for work in [15, 10, 5, 2]:
work_queue.put(work)
tasks = [
task('One', work_queue),
task('Two', work_queue)
]
# run the scheduler to run the tasks
start_time = time.time()
done = False
while not done:
for t in tasks:
try:
next(t)
except StopIteration:
tasks.remove(t)
if len(tasks) == 0:
done = True
print()
print(f'Task elapsed time: {time.time() - start_time:.1f}')
if __name__ == '__main__':
main()
| 23.075758 | 83 | 0.550886 |
c61c855194ada76d3956f7dc1656562be0558a60
| 1,501 |
py
|
Python
|
chapter100/postgres_03.py
|
thiagola92/learning-databases-with-python
|
cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e
|
[
"MIT"
] | null | null | null |
chapter100/postgres_03.py
|
thiagola92/learning-databases-with-python
|
cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e
|
[
"MIT"
] | null | null | null |
chapter100/postgres_03.py
|
thiagola92/learning-databases-with-python
|
cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e
|
[
"MIT"
] | null | null | null |
import asyncio
import psycopg
from datetime import datetime
start = datetime.now()
async def main():
client = await psycopg.AsyncConnection.connect(
"postgres://username:[email protected]"
)
cursor = await client.cursor()
await cursor.execute(
"""
CREATE TABLE table_name(
name text,
description text
)
"""
)
package = []
insert_sql = """
INSERT INTO table_name
VALUES(%s, %s)
"""
with open("utils/trash.csv") as file:
for line in file.readlines():
name, description = line.split(",")
package.append((name, description))
if len(package) >= 10000:
await asyncio.gather(
cursor.executemany(insert_sql, package[:2500]),
cursor.executemany(insert_sql, package[2500:5000]),
cursor.executemany(insert_sql, package[5000:7500]),
cursor.executemany(insert_sql, package[7500:]),
)
await client.commit()
package.clear()
if package:
await cursor.executemany(insert_sql, package)
await client.commit()
await cursor.execute("SELECT COUNT(*) FROM table_name")
print(await cursor.fetchone())
await cursor.execute("DROP TABLE table_name")
await cursor.close()
await client.commit()
await client.close()
asyncio.run(main())
print(datetime.now() - start)
| 23.825397 | 71 | 0.571619 |
c62ddfcde20e40bca5ab0c76df0f397bf947ced5
| 24,032 |
py
|
Python
|
Packs/Ansible_Powered_Integrations/Integrations/VMwareV2/VMwareV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Ansible_Powered_Integrations/Integrations/VMwareV2/VMwareV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Ansible_Powered_Integrations/Integrations/VMwareV2/VMwareV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import traceback
from typing import Dict, cast
import ansible_runner
import demistomock as demisto # noqa: F401
import ssh_agent_setup
from CommonServerPython import * # noqa: F401
# Dict to Markdown Converter adapted from https://github.com/PolBaladas/torsimany/
def dict2md(json_block, depth=0):
markdown = ""
if isinstance(json_block, dict):
markdown = parseDict(json_block, depth)
if isinstance(json_block, list):
markdown = parseList(json_block, depth)
return markdown
def parseDict(d, depth):
markdown = ""
for k in d:
if isinstance(d[k], (dict, list)):
markdown += addHeader(k, depth)
markdown += dict2md(d[k], depth + 1)
else:
markdown += buildValueChain(k, d[k], depth)
return markdown
def parseList(rawlist, depth):
markdown = ""
for value in rawlist:
if not isinstance(value, (dict, list)):
index = rawlist.index(value)
markdown += buildValueChain(index, value, depth)
else:
markdown += parseDict(value, depth)
return markdown
def buildHeaderChain(depth):
list_tag = '* '
htag = '#'
chain = list_tag * (bool(depth)) + htag * (depth + 1) + \
' value ' + (htag * (depth + 1) + '\n')
return chain
def buildValueChain(key, value, depth):
tab = " "
list_tag = '* '
chain = tab * (bool(depth - 1)) + list_tag + \
str(key) + ": " + str(value) + "\n"
return chain
def addHeader(value, depth):
chain = buildHeaderChain(depth)
chain = chain.replace('value', value.title())
return chain
# Remove ansible branding from results
def rec_ansible_key_strip(obj):
if isinstance(obj, dict):
return {key.replace('ansible_', ''): rec_ansible_key_strip(val) for key, val in obj.items()}
return obj
# COMMAND FUNCTIONS
def generic_ansible(integration_name, command, args: Dict[str, Any]) -> CommandResults:
readable_output = ""
sshkey = ""
fork_count = 1 # default to executing against 1 host at a time
if args.get('concurrency'):
fork_count = cast(int, args.get('concurrency'))
inventory: Dict[str, dict] = {}
inventory['all'] = {}
inventory['all']['hosts'] = {}
inventory['all']['hosts']['localhost'] = {}
inventory['all']['hosts']['localhost']['ansible_connection'] = 'local'
module_args = ""
# build module args list
for arg_key, arg_value in args.items():
# skip hardcoded host arg, as it doesn't related to module
if arg_key == 'host':
continue
module_args += "%s=\"%s\" " % (arg_key, arg_value)
# If this isn't host based, then all the integratation parms will be used as command args
for arg_key, arg_value in demisto.params().items():
module_args += "%s=\"%s\" " % (arg_key, arg_value)
r = ansible_runner.run(inventory=inventory, host_pattern='all', module=command, quiet=True,
omit_event_data=True, ssh_key=sshkey, module_args=module_args, forks=fork_count)
results = []
for each_host_event in r.events:
# Troubleshooting
# demisto.log("%s: %s\n" % (each_host_event['event'], each_host_event))
if each_host_event['event'] in ["runner_on_ok", "runner_on_unreachable", "runner_on_failed"]:
# parse results
result = json.loads('{' + each_host_event['stdout'].split('{', 1)[1])
host = each_host_event['stdout'].split('|', 1)[0].strip()
status = each_host_event['stdout'].replace('=>', '|').split('|', 3)[1]
# if successful build outputs
if each_host_event['event'] == "runner_on_ok":
if 'fact' in command:
result = result['ansible_facts']
else:
if result.get(command) is not None:
result = result[command]
else:
result.pop("ansible_facts", None)
result = rec_ansible_key_strip(result)
if host != "localhost":
readable_output += "# %s - %s\n" % (host, status)
else:
# This is integration is not host based
readable_output += "# %s\n" % status
readable_output += dict2md(result)
# add host and status to result
result['host'] = host
result['status'] = status
results.append(result)
if each_host_event['event'] == "runner_on_unreachable":
msg = "Host %s unreachable\nError Details: %s" % (host, result)
return_error(msg)
if each_host_event['event'] == "runner_on_failed":
msg = "Host %s failed running command\nError Details: %s" % (host, result)
return_error(msg)
# This is integration is not host based and always runs against localhost
results = results[0]
return CommandResults(
readable_output=readable_output,
outputs_prefix=integration_name + '.' + command,
outputs_key_field='',
outputs=results
)
# MAIN FUNCTION
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# SSH Key integration requires ssh_agent to be running in the background
ssh_agent_setup.setup()
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('ok')
elif demisto.command() == 'vmware-about-info':
return_results(generic_ansible('vmwarev2', 'vmware_about_info', demisto.args()))
elif demisto.command() == 'vmware-category':
return_results(generic_ansible('vmwarev2', 'vmware_category', demisto.args()))
elif demisto.command() == 'vmware-category-info':
return_results(generic_ansible('vmwarev2', 'vmware_category_info', demisto.args()))
elif demisto.command() == 'vmware-cfg-backup':
return_results(generic_ansible('vmwarev2', 'vmware_cfg_backup', demisto.args()))
elif demisto.command() == 'vmware-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_cluster', demisto.args()))
elif demisto.command() == 'vmware-cluster-drs':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_drs', demisto.args()))
elif demisto.command() == 'vmware-cluster-ha':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_ha', demisto.args()))
elif demisto.command() == 'vmware-cluster-info':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_info', demisto.args()))
elif demisto.command() == 'vmware-cluster-vsan':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_vsan', demisto.args()))
elif demisto.command() == 'vmware-content-deploy-template':
return_results(generic_ansible('vmwarev2', 'vmware_content_deploy_template', demisto.args()))
elif demisto.command() == 'vmware-content-library-info':
return_results(generic_ansible('vmwarev2', 'vmware_content_library_info', demisto.args()))
elif demisto.command() == 'vmware-content-library-manager':
return_results(generic_ansible('vmwarev2', 'vmware_content_library_manager', demisto.args()))
elif demisto.command() == 'vmware-datacenter':
return_results(generic_ansible('vmwarev2', 'vmware_datacenter', demisto.args()))
elif demisto.command() == 'vmware-datastore-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_cluster', demisto.args()))
elif demisto.command() == 'vmware-datastore-info':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_info', demisto.args()))
elif demisto.command() == 'vmware-datastore-maintenancemode':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_maintenancemode', demisto.args()))
elif demisto.command() == 'vmware-dns-config':
return_results(generic_ansible('vmwarev2', 'vmware_dns_config', demisto.args()))
elif demisto.command() == 'vmware-drs-group':
return_results(generic_ansible('vmwarev2', 'vmware_drs_group', demisto.args()))
elif demisto.command() == 'vmware-drs-group-info':
return_results(generic_ansible('vmwarev2', 'vmware_drs_group_info', demisto.args()))
elif demisto.command() == 'vmware-drs-rule-info':
return_results(generic_ansible('vmwarev2', 'vmware_drs_rule_info', demisto.args()))
elif demisto.command() == 'vmware-dvs-host':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_host', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup-find':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup_find', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup-info':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup_info', demisto.args()))
elif demisto.command() == 'vmware-dvswitch':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-lacp':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_lacp', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-nioc':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_nioc', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-pvlans':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_pvlans', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-uplink-pg':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_uplink_pg', demisto.args()))
elif demisto.command() == 'vmware-evc-mode':
return_results(generic_ansible('vmwarev2', 'vmware_evc_mode', demisto.args()))
elif demisto.command() == 'vmware-folder-info':
return_results(generic_ansible('vmwarev2', 'vmware_folder_info', demisto.args()))
elif demisto.command() == 'vmware-guest':
return_results(generic_ansible('vmwarev2', 'vmware_guest', demisto.args()))
elif demisto.command() == 'vmware-guest-boot-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_boot_info', demisto.args()))
elif demisto.command() == 'vmware-guest-boot-manager':
return_results(generic_ansible('vmwarev2', 'vmware_guest_boot_manager', demisto.args()))
elif demisto.command() == 'vmware-guest-custom-attribute-defs':
return_results(generic_ansible('vmwarev2', 'vmware_guest_custom_attribute_defs', demisto.args()))
elif demisto.command() == 'vmware-guest-custom-attributes':
return_results(generic_ansible('vmwarev2', 'vmware_guest_custom_attributes', demisto.args()))
elif demisto.command() == 'vmware-guest-customization-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_customization_info', demisto.args()))
elif demisto.command() == 'vmware-guest-disk':
return_results(generic_ansible('vmwarev2', 'vmware_guest_disk', demisto.args()))
elif demisto.command() == 'vmware-guest-disk-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_disk_info', demisto.args()))
elif demisto.command() == 'vmware-guest-find':
return_results(generic_ansible('vmwarev2', 'vmware_guest_find', demisto.args()))
elif demisto.command() == 'vmware-guest-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_info', demisto.args()))
elif demisto.command() == 'vmware-guest-move':
return_results(generic_ansible('vmwarev2', 'vmware_guest_move', demisto.args()))
elif demisto.command() == 'vmware-guest-network':
return_results(generic_ansible('vmwarev2', 'vmware_guest_network', demisto.args()))
elif demisto.command() == 'vmware-guest-powerstate':
return_results(generic_ansible('vmwarev2', 'vmware_guest_powerstate', demisto.args()))
elif demisto.command() == 'vmware-guest-screenshot':
return_results(generic_ansible('vmwarev2', 'vmware_guest_screenshot', demisto.args()))
elif demisto.command() == 'vmware-guest-sendkey':
return_results(generic_ansible('vmwarev2', 'vmware_guest_sendkey', demisto.args()))
elif demisto.command() == 'vmware-guest-snapshot':
return_results(generic_ansible('vmwarev2', 'vmware_guest_snapshot', demisto.args()))
elif demisto.command() == 'vmware-guest-snapshot-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_snapshot_info', demisto.args()))
elif demisto.command() == 'vmware-guest-tools-upgrade':
return_results(generic_ansible('vmwarev2', 'vmware_guest_tools_upgrade', demisto.args()))
elif demisto.command() == 'vmware-guest-tools-wait':
return_results(generic_ansible('vmwarev2', 'vmware_guest_tools_wait', demisto.args()))
elif demisto.command() == 'vmware-guest-video':
return_results(generic_ansible('vmwarev2', 'vmware_guest_video', demisto.args()))
elif demisto.command() == 'vmware-guest-vnc':
return_results(generic_ansible('vmwarev2', 'vmware_guest_vnc', demisto.args()))
elif demisto.command() == 'vmware-host':
return_results(generic_ansible('vmwarev2', 'vmware_host', demisto.args()))
elif demisto.command() == 'vmware-host-acceptance':
return_results(generic_ansible('vmwarev2', 'vmware_host_acceptance', demisto.args()))
elif demisto.command() == 'vmware-host-active-directory':
return_results(generic_ansible('vmwarev2', 'vmware_host_active_directory', demisto.args()))
elif demisto.command() == 'vmware-host-capability-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_capability_info', demisto.args()))
elif demisto.command() == 'vmware-host-config-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_config_info', demisto.args()))
elif demisto.command() == 'vmware-host-config-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_config_manager', demisto.args()))
elif demisto.command() == 'vmware-host-datastore':
return_results(generic_ansible('vmwarev2', 'vmware_host_datastore', demisto.args()))
elif demisto.command() == 'vmware-host-dns-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_dns_info', demisto.args()))
elif demisto.command() == 'vmware-host-facts':
return_results(generic_ansible('vmwarev2', 'vmware_host_facts', demisto.args()))
elif demisto.command() == 'vmware-host-feature-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_feature_info', demisto.args()))
elif demisto.command() == 'vmware-host-firewall-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_firewall_info', demisto.args()))
elif demisto.command() == 'vmware-host-firewall-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_firewall_manager', demisto.args()))
elif demisto.command() == 'vmware-host-hyperthreading':
return_results(generic_ansible('vmwarev2', 'vmware_host_hyperthreading', demisto.args()))
elif demisto.command() == 'vmware-host-ipv6':
return_results(generic_ansible('vmwarev2', 'vmware_host_ipv6', demisto.args()))
elif demisto.command() == 'vmware-host-kernel-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_kernel_manager', demisto.args()))
elif demisto.command() == 'vmware-host-lockdown':
return_results(generic_ansible('vmwarev2', 'vmware_host_lockdown', demisto.args()))
elif demisto.command() == 'vmware-host-ntp':
return_results(generic_ansible('vmwarev2', 'vmware_host_ntp', demisto.args()))
elif demisto.command() == 'vmware-host-ntp-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_ntp_info', demisto.args()))
elif demisto.command() == 'vmware-host-package-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_package_info', demisto.args()))
elif demisto.command() == 'vmware-host-powermgmt-policy':
return_results(generic_ansible('vmwarev2', 'vmware_host_powermgmt_policy', demisto.args()))
elif demisto.command() == 'vmware-host-powerstate':
return_results(generic_ansible('vmwarev2', 'vmware_host_powerstate', demisto.args()))
elif demisto.command() == 'vmware-host-scanhba':
return_results(generic_ansible('vmwarev2', 'vmware_host_scanhba', demisto.args()))
elif demisto.command() == 'vmware-host-service-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_service_info', demisto.args()))
elif demisto.command() == 'vmware-host-service-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_service_manager', demisto.args()))
elif demisto.command() == 'vmware-host-snmp':
return_results(generic_ansible('vmwarev2', 'vmware_host_snmp', demisto.args()))
elif demisto.command() == 'vmware-host-ssl-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_ssl_info', demisto.args()))
elif demisto.command() == 'vmware-host-vmhba-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_vmhba_info', demisto.args()))
elif demisto.command() == 'vmware-host-vmnic-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_vmnic_info', demisto.args()))
elif demisto.command() == 'vmware-local-role-info':
return_results(generic_ansible('vmwarev2', 'vmware_local_role_info', demisto.args()))
elif demisto.command() == 'vmware-local-role-manager':
return_results(generic_ansible('vmwarev2', 'vmware_local_role_manager', demisto.args()))
elif demisto.command() == 'vmware-local-user-info':
return_results(generic_ansible('vmwarev2', 'vmware_local_user_info', demisto.args()))
elif demisto.command() == 'vmware-local-user-manager':
return_results(generic_ansible('vmwarev2', 'vmware_local_user_manager', demisto.args()))
elif demisto.command() == 'vmware-maintenancemode':
return_results(generic_ansible('vmwarev2', 'vmware_maintenancemode', demisto.args()))
elif demisto.command() == 'vmware-migrate-vmk':
return_results(generic_ansible('vmwarev2', 'vmware_migrate_vmk', demisto.args()))
elif demisto.command() == 'vmware-object-role-permission':
return_results(generic_ansible('vmwarev2', 'vmware_object_role_permission', demisto.args()))
elif demisto.command() == 'vmware-portgroup':
return_results(generic_ansible('vmwarev2', 'vmware_portgroup', demisto.args()))
elif demisto.command() == 'vmware-portgroup-info':
return_results(generic_ansible('vmwarev2', 'vmware_portgroup_info', demisto.args()))
elif demisto.command() == 'vmware-resource-pool':
return_results(generic_ansible('vmwarev2', 'vmware_resource_pool', demisto.args()))
elif demisto.command() == 'vmware-resource-pool-info':
return_results(generic_ansible('vmwarev2', 'vmware_resource_pool_info', demisto.args()))
elif demisto.command() == 'vmware-tag':
return_results(generic_ansible('vmwarev2', 'vmware_tag', demisto.args()))
elif demisto.command() == 'vmware-tag-info':
return_results(generic_ansible('vmwarev2', 'vmware_tag_info', demisto.args()))
elif demisto.command() == 'vmware-tag-manager':
return_results(generic_ansible('vmwarev2', 'vmware_tag_manager', demisto.args()))
elif demisto.command() == 'vmware-target-canonical-info':
return_results(generic_ansible('vmwarev2', 'vmware_target_canonical_info', demisto.args()))
elif demisto.command() == 'vmware-vcenter-settings':
return_results(generic_ansible('vmwarev2', 'vmware_vcenter_settings', demisto.args()))
elif demisto.command() == 'vmware-vcenter-statistics':
return_results(generic_ansible('vmwarev2', 'vmware_vcenter_statistics', demisto.args()))
elif demisto.command() == 'vmware-vm-host-drs-rule':
return_results(generic_ansible('vmwarev2', 'vmware_vm_host_drs_rule', demisto.args()))
elif demisto.command() == 'vmware-vm-info':
return_results(generic_ansible('vmwarev2', 'vmware_vm_info', demisto.args()))
elif demisto.command() == 'vmware-vm-shell':
return_results(generic_ansible('vmwarev2', 'vmware_vm_shell', demisto.args()))
elif demisto.command() == 'vmware-vm-storage-policy-info':
return_results(generic_ansible('vmwarev2', 'vmware_vm_storage_policy_info', demisto.args()))
elif demisto.command() == 'vmware-vm-vm-drs-rule':
return_results(generic_ansible('vmwarev2', 'vmware_vm_vm_drs_rule', demisto.args()))
elif demisto.command() == 'vmware-vm-vss-dvs-migrate':
return_results(generic_ansible('vmwarev2', 'vmware_vm_vss_dvs_migrate', demisto.args()))
elif demisto.command() == 'vmware-vmkernel':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel', demisto.args()))
elif demisto.command() == 'vmware-vmkernel-info':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel_info', demisto.args()))
elif demisto.command() == 'vmware-vmkernel-ip-config':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel_ip_config', demisto.args()))
elif demisto.command() == 'vmware-vmotion':
return_results(generic_ansible('vmwarev2', 'vmware_vmotion', demisto.args()))
elif demisto.command() == 'vmware-vsan-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_vsan_cluster', demisto.args()))
elif demisto.command() == 'vmware-vspan-session':
return_results(generic_ansible('vmwarev2', 'vmware_vspan_session', demisto.args()))
elif demisto.command() == 'vmware-vswitch':
return_results(generic_ansible('vmwarev2', 'vmware_vswitch', demisto.args()))
elif demisto.command() == 'vmware-vswitch-info':
return_results(generic_ansible('vmwarev2', 'vmware_vswitch_info', demisto.args()))
elif demisto.command() == 'vmware-vsphere-file':
return_results(generic_ansible('vmwarev2', 'vsphere_file', demisto.args()))
elif demisto.command() == 'vmware-vcenter-extension':
return_results(generic_ansible('vmwarev2', 'vcenter_extension', demisto.args()))
elif demisto.command() == 'vmware-vcenter-extension-info':
return_results(generic_ansible('vmwarev2', 'vcenter_extension_info', demisto.args()))
elif demisto.command() == 'vmware-vcenter-folder':
return_results(generic_ansible('vmwarev2', 'vcenter_folder', demisto.args()))
elif demisto.command() == 'vmware-vcenter-license':
return_results(generic_ansible('vmwarev2', 'vcenter_license', demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
# ENTRY POINT
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 56.679245 | 109 | 0.656666 |
d6a5db705d8f686da0afa5b69e90350e26b17f99
| 20,538 |
py
|
Python
|
server/game_logic.py
|
JungleState/ksr_junglestate
|
99e86458b5056507bd36906f295bef11f2c734f5
|
[
"MIT"
] | 1 |
2022-01-25T07:53:12.000Z
|
2022-01-25T07:53:12.000Z
|
server/game_logic.py
|
JungleState/ksr_junglestate
|
99e86458b5056507bd36906f295bef11f2c734f5
|
[
"MIT"
] | 14 |
2022-01-03T13:19:11.000Z
|
2022-01-12T15:36:42.000Z
|
server/game_logic.py
|
JungleState/ksr_junglestate
|
99e86458b5056507bd36906f295bef11f2c734f5
|
[
"MIT"
] | null | null | null |
from random import randint
import functools
import logging
import threading
logging.getLogger().setLevel("DEBUG")
class Item:
def __init__(self, name, id):
self.name = name
self.id = id
def __str__(self) -> str:
return self.id
class Rules:
TIME_TO_MOVE = 0.5
SIGHT = 2
class Scores:
KNOCK_OUT = 50
HIT = 25
PINEAPPLE = 15
BANANA = 5
class Damage:
COCONUT = 1
FOREST = 1
PLAYER = 1
class Items:
EMPTY = Item("empty", " ")
FOREST = Item("forest", "FF")
COCONUT = Item("coconut", "CC")
BANANA = Item("banana", "BB")
PINEAPPLE = Item("pineapple", "PP")
class Player(Item):
def __init__(self, uuid, id, name):
self.id = id
self.uuid = uuid
self.knockouts = 0
self.hits = 0
self.x = 0
self.y = 0
self.knock_score = 0
# dimension of field of view matrix, needs to be odd
self.sight = Rules.SIGHT * 2 + 1
self.name = name
self.lives = 3
self.coconuts = 2
self.points = 0
self.state = 0 # 0 = alive ; 1 = dead
self.active = True
self.message = ""
def item_dict(self):
return {"coconuts": self.coconuts,
"id": self.id,
"knockouts": self.knockouts,
"hits": self.hits,
"name": self.name,
"lives": self.lives,
"points": self.points,
"active": self.active,
"knockScore": self.knock_score,
"message": self.message}
class MapGenerator:
""" A map generator that creates empty maps with forest all around."""
def generate(self, width, height):
matrix = []
for y in range(height):
row = []
matrix.append(row)
for x in range(width):
if y < Rules.SIGHT or y >= height - Rules.SIGHT or x < Rules.SIGHT or x >= width - Rules.SIGHT:
row.append(self.border())
else:
row.append(self.inner())
return matrix
def border(self):
return Items.FOREST
def inner(self):
return Items.EMPTY
class RandomGenerator(MapGenerator):
def __init__(self, forest_spawning_rate, coconut_rate, banana_rate, pinapple_rate):
self.forest_spawning_rate = forest_spawning_rate
self.coconut_rate = coconut_rate
self.banana_rate = banana_rate
self.pinapple_rate = pinapple_rate
def inner(self):
prob = randint(1, 100)
if prob <= self.forest_spawning_rate:
return Items.FOREST
elif prob <= self.forest_spawning_rate + self.coconut_rate:
return Items.COCONUT
elif prob <= self.forest_spawning_rate + self.coconut_rate + self.banana_rate:
return Items.BANANA
elif prob <= self.forest_spawning_rate + self.coconut_rate + self.banana_rate + self.pinapple_rate:
return Items.PINEAPPLE
else:
return super().inner()
def purge(self, matrix):
plus_list = [1, -1, 0, 0]
too_many_surrounding_obstacles = 1
while too_many_surrounding_obstacles > 0:
too_many_surrounding_obstacles = 0
for y in range(len(matrix)-Rules.SIGHT*2):
y += Rules.SIGHT
for x in range(len(matrix[y])-Rules.SIGHT*2):
x += Rules.SIGHT
if matrix[y][x] != Items.FOREST:
surrounding_obstacles = 0
for i in range(4):
if matrix[y+plus_list[i]][x+plus_list[-(i+1)]] == Items.FOREST:
surrounding_obstacles += 1
if surrounding_obstacles > 2:
too_many_surrounding_obstacles += 1
while surrounding_obstacles > 2:
index = randint(0, 3)
y_coord = y+plus_list[index]
x_coord = x+plus_list[-(index+1)]
if y_coord > Rules.SIGHT-1 and y_coord < len(matrix)-Rules.SIGHT and x_coord > Rules.SIGHT-1 and x_coord < len(matrix[y])-Rules.SIGHT and matrix[y_coord][x_coord] == Items.FOREST:
matrix[y_coord][x_coord] = super().inner()
surrounding_obstacles -= 1
return matrix
class Game:
def __init__(self, id, field_dimensions, name, generator=RandomGenerator(20, 1, 1, 1)):
self.password = ""
self.consoleText = []
self.id = id
self.state = 0
self.round = 0
self.move_list = []
self.player_list = []
self.safed_items_list = []
self.shooting = []
self.updated = False
self.serverName = name
(self.field_lengh, self.field_height) = field_dimensions
# field dimension 1st element = x; 2nd element = y
self.matrix = generator.purge(
generator.generate(self.field_lengh, self.field_height))
self.field_dim = [self.field_lengh, self.field_height]
self.fullRoundEvent = threading.Event()
self.stopEvent = threading.Event()
self.roundMakerThread = threading.Thread(
target=self.blockUntilNextRound, name="roundMaker", daemon=True)
self.roundMakerThread.start()
def dispose(self):
self.stopEvent.set()
def getId(self):
pass
def join(self, name, uuid):
# Get Id rangin from "00" to "99"
logging.debug(f"Player {uuid} joined as {name}")
player = Player(uuid, uuid, name)
while True:
x = randint(1, self.field_dim[0]-Rules.SIGHT)
y = randint(1, self.field_dim[1]-Rules.SIGHT)
if self.getElementAt(x, y) == Items.EMPTY:
player.x = x
player.y = y
self.setElementAt(x, y, player)
self.player_list.append(player)
break
def SerializeMatrix(self):
rows = []
for row in self.matrix:
rows.append("".join([self.SerializeItem(item) for item in row]))
return rows
def SerializeItem(self, item):
if isinstance(item, Player):
return f"{self.player_list.index(item):02d}"
return str(item)
def getPlayerFromID(self, player_id):
for player in self.player_list:
if player.id == player_id:
return player
return False
def kickPlayer(self, player_name):
"""gets player name, kicks player"""
for player in self.player_list:
if player.name == player_name:
coconut_in_this_cell = False
for safed_item in self.safed_items_list:
if [player.x, player.y] == safed_item[1]:
coconut_in_this_cell = True
self.setElementAt(player.x, player.y, Items.COCONUT)
del self.safed_items_list[self.safed_items_list.index(
safed_item)]
break
if not coconut_in_this_cell:
self.setElementAt(player.x, player.y, Items.EMPTY)
del self.player_list[self.player_list.index(player)]
def addMove(self, player_id, move_id, dir):
"""move_id list:
0: Stay
1: Move
2: Shoot
dir list:
-1: No direction
0: up
1: up right
2: right
3: down right
4: down
5: down left
6: left
7: up left"""
if len(self.move_list) == 0:
timer = threading.Timer(Rules.TIME_TO_MOVE, functools.partial(
self.kickOffRound, self.round))
timer.start()
for move in self.move_list:
if move[0] == player_id:
self.move_list[self.move_list.index(move)] = [
player_id, move_id, dir]
return True
if self.getPlayerFromID(player_id).state == 1:
logging.debug(
f"Rejecting move from Player {player_id} who is knocked out.")
return False
logging.debug(f"Adding move from {player_id}.")
self.move_list.append([player_id, move_id, dir])
alive = 0
for player in self.player_list:
if player.state == 0:
alive += 1
if len(self.move_list) == alive:
logging.debug("All players moved - next round!")
self.fullRoundEvent.set()
return True
def GetPlayerListForJSON(self):
player_list = []
for player in self.player_list:
player_list.append({"id": player.id,
"name": player.name,
"health": player.lives,
"knockouts": player.knockouts,
"hits": player.hits,
# coconuts
"coconuts": player.coconuts,
"points": player.points})
return player_list
def kickOffRound(self, round):
if round == self.round:
logging.debug(
"Next round after timeout - not all players have moved!")
self.fullRoundEvent.set()
def blockUntilNextRound(self):
while self.fullRoundEvent.wait():
self.fullRoundEvent.clear()
if self.stopEvent.is_set():
return
self.doNextRound()
def doNextRound(self):
move_list = list(self.move_list)
self.updated = True
self.move_list.clear()
for move in move_list: # check for moving
if move[1] == 1:
player = self.getPlayerFromID(move[0])
self.executeMoving(player, move[2])
for move in move_list: # check for shooting
if move[1] == 2:
player = self.getPlayerFromID(move[0])
self.executeShooting(player, move[2])
for player in self.player_list:
if player.lives <= 0 and player.state == 0:
player.state = 1
self.setElementAt(player.x, player.y, Items.EMPTY)
for safed_item in self.safed_items_list:
if safed_item[2] != self.round: # Item is from previous round round
if self.getElementAtCoords(safed_item[1]) == Items.EMPTY:
self.setElementAtCoords(safed_item[1], safed_item[0])
elif isinstance(self.getElementAtCoords(safed_item[1]), Player) == False:
del self.safed_items_list[self.safed_items_list.index(
safed_item)]
self.round += 1
def spawnItem(self, item):
while True:
x = randint(1, self.field_dim[0]-Rules.SIGHT)
y = randint(1, self.field_dim[1]-Rules.SIGHT)
if self.getElementAt(x, y) == Items.EMPTY:
self.setElementAt(x, y, item)
break
def getElementAt(self, x, y):
return self.matrix[y][x]
def getElementAtCoords(self, coords):
return self.getElementAt(coords[0], coords[1])
def setElementAt(self, x, y, item):
self.matrix[y][x] = item
def setElementAtCoords(self, coords, item):
self.setElementAt(coords[0], coords[1], item)
def executeMoving(self, player, dir):
logging.debug(f"Moving player {player.id} in direction {dir}!")
toCoordinates = [player.x, player.y]
if dir == 0:
toCoordinates[1] -= 1
elif dir == 2:
toCoordinates[0] += 1
elif dir == 4:
toCoordinates[1] += 1
elif dir == 6:
toCoordinates[0] -= 1
checkField = self.getElementAtCoords(toCoordinates)
if checkField == Items.EMPTY: # empty field
self.setElementAt(player.x, player.y, Items.EMPTY)
self.setElementAtCoords(toCoordinates, player)
player.x, player.y = toCoordinates[0], toCoordinates[1]
elif checkField == Items.FOREST: # forest field
self.handlePlayerDamage(player, Rules.Damage.FOREST)
elif isinstance(checkField, Player):
self.handlePlayerDamage(player, Rules.Damage.PLAYER)
player2 = checkField
self.handlePlayerDamage(player2, Rules.Damage.PLAYER)
elif isinstance(checkField, Item):
item_picked_up = False
if checkField == Items.PINEAPPLE:
self.handleScore(player, Rules.Scores.PINEAPPLE)
item_picked_up = True
elif checkField == Items.BANANA:
if player.lives < 3:
player.lives += 1
else:
self.handleScore(player, Rules.Scores.BANANA)
item_picked_up = True
elif checkField == Items.COCONUT:
if player.coconuts < 3:
item_picked_up = True
player.coconuts += 1
for safed_item in self.safed_items_list:
if safed_item[1] == toCoordinates:
index = self.safed_items_list.index(safed_item)
del self.safed_items_list[index]
break
else:
safed_item_in_safed_items_list = False
for safed_item in self.safed_items_list:
if safed_item[0] == toCoordinates:
safed_item_in_safed_items_list = True
if not safed_item_in_safed_items_list:
self.safed_items_list.append(
(Items.COCONUT, toCoordinates, self.round))
if checkField != Items.FOREST: # empty field
self.setElementAt(player.x, player.y, Items.EMPTY)
self.setElementAtCoords(toCoordinates, player)
player.x, player.y = toCoordinates[0], toCoordinates[1]
if item_picked_up:
self.spawnItem(checkField)
def handlePlayerDamage(self, player, damage=1):
"""Inflicts damage on the given player and returns True if the player is knocked out."""
logging.debug(f'Player {player.uuid} is hurting {damage}')
player.lives -= damage
if player.lives < 1:
logging.debug(f'Player {player.uuid} is knocked out - sleep well!')
player.state = 1
self.setElementAt(player.x, player.y, Items.EMPTY)
self.consoleText.append(f"{player.name} is knocked out!")
self.clearConsole()
return True
return False
def handleScore(self, player, score=0):
"""Changes the given player's score."""
logging.debug(f'Player {player.uuid} scored {score}')
player.points += score
def formatShooting(self):
shotJson = {"shots": []}
for shot in self.shooting:
shotJson["shots"].append({
"id": self.player_list.index(shot[0]),
"coords": shot[1]
})
self.shooting = []
return shotJson
def executeShooting(self, player, dir):
if player.coconuts > 0:
logging.debug(f"Shooting player {player.id} in direction {dir}!")
toCoordinates = [player.x, player.y]
if dir == 0:
toCoordinates[1] -= 1
elif dir == 1:
toCoordinates[0] += 1
toCoordinates[1] -= 1
elif dir == 2:
toCoordinates[0] += 1
elif dir == 3:
toCoordinates[0] += 1
toCoordinates[1] += 1
elif dir == 4:
toCoordinates[1] += 1
elif dir == 5:
toCoordinates[1] += 1
toCoordinates[0] -= 1
elif dir == 6:
toCoordinates[0] -= 1
elif dir == 7:
toCoordinates[1] -= 1
toCoordinates[0] -= 1
self.shooting.append((player, dir))
checkField = self.getElementAtCoords(toCoordinates)
if isinstance(checkField, Player): # player field
player2 = checkField
logging.debug(f'Player {player2.uuid} hit')
if self.handlePlayerDamage(player2, Rules.Damage.COCONUT):
self.handleScore(player, Rules.Scores.KNOCK_OUT)
player.knock_score += 1
self.consoleText.append(f"{player.name} knocked out {player2.name}!")
self.clearConsole() # Clear old messages
else:
self.handleScore(player, Rules.Scores.HIT)
player.coconuts -= 1
for safed_item in self.safed_items_list:
if safed_item[1] == [player.x, player.y]:
player.coconuts += 1
self.spawnItem(Items.COCONUT)
del self.safed_items_list[self.safed_items_list.index(
safed_item)]
break
def clearConsole(self):
if len(self.consoleText) == 8:
self.consoleText.pop(0)
def getFOV(self, player):
field_of_view_matrix = []
sight_x = player.sight
sight_y = player.sight
point_of_player_in_sight_matrix = [
int(player.sight/2), int(player.sight/2)]
# makes matrix
for y in range(sight_y):
field_of_view_matrix.append("")
for x in range(sight_x):
final_y = y+player.y-point_of_player_in_sight_matrix[1]
final_x = x+player.x-point_of_player_in_sight_matrix[0]
field_of_view_matrix[y] += self.SerializeItem(
self.getElementAt(final_x, final_y))
return field_of_view_matrix
def GetFieldOfView(self, player_id): # for specific player
for player in self.player_list:
if player.id == player_id:
return self.getFOV(player)
def GetPlayerVar(self, player_id, item): # for specific player
for player in self.player_list:
if player.id == player_id:
if item == "CC":
return player.coconuts
elif item == "P":
return player.points
elif item == "lives":
return player.lives
elif item == "state":
return player.state
def GetPlayers(self):
return {self.player_list.index(player): player.name for player in self.player_list}
def Scoreboard(self, sortby, hyrarchy):
sorted_player_list = [i for i in range(len(self.player_list))]
item_list_dict = {"coconuts": [player.coconuts for player in self.player_list],
"lives": [player.lives for player in self.player_list],
"points": [player.points for player in self.player_list],
"knockouts": [player.knockouts for player in self.player_list],
"hits": [player.hits for player in self.player_list],
"name": [player.name[0] for player in self.player_list],
"message": [player.message for player in self.player_list],
"active": [player.active for player in self.player_list],
"knockScore": [player.knock_score for player in self.player_list]
}
sorted_list = sorted(item_list_dict[f"{sortby}"])
item_list = item_list_dict[f"{sortby}"]
for i in range(len(item_list)):
plus_index = 0
while isinstance(sorted_player_list[sorted_list.index(item_list[i]) + plus_index], Player):
plus_index += 1
sorted_player_list[sorted_list.index(
item_list[i]) + plus_index] = self.player_list[i]
sorted_player_id_list = [player.item_dict()
for player in sorted_player_list]
if hyrarchy == "decr":
if sortby != "name":
sorted_player_id_list.reverse()
elif hyrarchy == "incr":
if sortby == "name":
sorted_player_id_list.reverse()
return sorted_player_id_list
| 36.806452 | 207 | 0.539634 |
242ab624f6041e0f0ff0c58d127b511bf351b7a3
| 2,277 |
py
|
Python
|
official/audio/melgan/ascend310_infer/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/audio/melgan/ascend310_infer/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/audio/melgan/ascend310_infer/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""310 data_processing"""
import os
import argparse
import numpy as np
from scipy.io.wavfile import write
parser = argparse.ArgumentParser(description='MelGAN')
parser.add_argument('--wav_path', type=str, default='', help='wav data path')
parser.add_argument('--bin_path', type=str, default='', help='bin data path')
parser.add_argument('--sample', type=int, default=22050, help='wav sample')
parser.add_argument('--mode', type=int, choices=[1, 2], default=1,
help='1 for wav to bin, 2 for bin to wav (Default: 1)')
args_opt = parser.parse_args()
if args_opt.mode == 1:
path_all = args_opt.wav_path
if not os.path.exists(args_opt.bin_path):
os.mkdir(args_opt.bin_path)
else:
path_all = args_opt.bin_path
if not os.path.exists(args_opt.wav_path):
os.mkdir(args_opt.wav_path)
filenames = os.listdir(path_all)
for filename in filenames:
if args_opt.mode == 1:
new_name = os.path.join(args_opt.bin_path, filename[:-4]+'.bin')
temp = np.load(path_all+'/'+ filename)
temp = (temp + 5) / 5
if temp.shape[1] < 240:
temp_1 = 240 - temp.shape[1]
temp = np.pad(temp, ((0, 0), (0, temp_1)), mode='constant', constant_values=0.0)
temp[:, :240].tofile(new_name)
else:
abc = np.fromfile(os.path.join(path_all, filename), dtype='float32')
wav_data = 32768.0 * abc
output_path = os.path.join(args_opt.wav_path, filename).replace('.bin', '.wav')
write(output_path, args_opt.sample, wav_data.astype('int16'))
print('get {}, please check it'.format(output_path))
| 42.166667 | 92 | 0.658762 |
cc6957a197b7f3640abef490156842f956195524
| 1,090 |
py
|
Python
|
src/onegov/ballot/models/election/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/ballot/models/election/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/ballot/models/election/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.ballot.models.election.association import ElectionAssociation
from onegov.ballot.models.election.candidate import Candidate
from onegov.ballot.models.election.candidate_result import CandidateResult
from onegov.ballot.models.election.election import Election
from onegov.ballot.models.election.election_compound import ElectionCompound
from onegov.ballot.models.election.election_result import ElectionResult
from onegov.ballot.models.election.list import List
from onegov.ballot.models.election.list_connection import ListConnection
from onegov.ballot.models.election.list_result import ListResult
from onegov.ballot.models.election.panachage_result import PanachageResult
from onegov.ballot.models.election.party_result import PartyResult
from onegov.ballot.models.election.proporz_election import ProporzElection
__all__ = [
'Candidate',
'CandidateResult',
'Election',
'ElectionAssociation',
'ElectionCompound',
'ElectionResult',
'List',
'ListConnection',
'ListResult',
'PanachageResult',
'PartyResult',
'ProporzElection',
]
| 37.586207 | 76 | 0.815596 |
cca977640d2f6e18b7bb9a3f372ec4db7db02e25
| 2,621 |
py
|
Python
|
datastories_semeval2017_task4/embeddings/WordVectorsManager.py
|
florianfricke/Bachelor_Thesis_Sentiment_Analyse
|
aa1fa95cfbc13115ee60baaf79eab0d1940998ab
|
[
"MIT"
] | 1 |
2020-06-04T13:20:45.000Z
|
2020-06-04T13:20:45.000Z
|
datastories_semeval2017_task4/embeddings/WordVectorsManager.py
|
florianfricke/Bachelor_Thesis_Sentiment_Analyse
|
aa1fa95cfbc13115ee60baaf79eab0d1940998ab
|
[
"MIT"
] | 6 |
2020-06-03T18:45:11.000Z
|
2022-02-10T01:51:03.000Z
|
datastories_semeval2017_task4/embeddings/WordVectorsManager.py
|
florianfricke/Bachelor_Thesis_Sentiment_Analyse
|
aa1fa95cfbc13115ee60baaf79eab0d1940998ab
|
[
"MIT"
] | null | null | null |
import errno
import os
import pickle
import numpy
from utilities_nn.ResourceManager import ResourceManager
class WordVectorsManager(ResourceManager):
def __init__(self, corpus=None, dim=None, omit_non_english=False):
super().__init__()
self.omit_non_english = omit_non_english
self.wv_filename = "{}.{}d".format(corpus, str(dim))
self.parsed_filename = "{}.{}d.pickle".format(corpus, str(dim))
def is_ascii(self, text):
try:
text.encode('ascii')
return True
except:
return False
def write(self):
_word_vector_file = os.path.join(os.path.dirname(__file__), self.wv_filename) # return file directory
if os.path.exists(_word_vector_file):
print('Indexing file {} ...'.format(self.wv_filename))
embeddings_dict = {}
with open(_word_vector_file, "r", encoding="utf-8") as file:
for i, line in enumerate(file):
if line.strip() != "" or line != "\n": # or len(line) > 0
values = line.split()
word = values[0]
coefs = numpy.asarray(values[1:], dtype='float32')
if word.lower() in {'<unk>', "<unknown>"}:
print(word)
print("UNKNOWN")
print()
if self.omit_non_english and not self.is_ascii(word):
continue
if word not in embeddings_dict or word.strip() == "":
embeddings_dict[word] = coefs
# 'House': array([0.174788, 0.091168, -0.317676,...])
print('Found %s word vectors.' % len(embeddings_dict))
# save Embeddings into a pickle-File
with open(os.path.join(os.path.dirname(__file__), self.parsed_filename), 'wb') as pickle_file:
pickle.dump(embeddings_dict, pickle_file)
else:
print("{} not found!".format(_word_vector_file))
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), _word_vector_file)
# load pickle file
def read(self):
_parsed_file = os.path.join(os.path.dirname(__file__), self.parsed_filename)
if os.path.exists(_parsed_file): # pickle file for Embeddings available
with open(_parsed_file, 'rb') as f:
return pickle.load(f)
else: # no pickle file for Embeddings available
self.write()
return self.read()
| 40.323077 | 109 | 0.54979 |
d1d5c4248dc98ceb78d1d3d12f2b463f38fb1d21
| 899 |
py
|
Python
|
IVTa/2014/BURMISTROV_V_D/task4_45.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/BURMISTROV_V_D/task4_45.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/BURMISTROV_V_D/task4_45.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 4. Вариант 45.
# Напишите программу, которая выводит имя, под которым скрывается Борис Николаевич Кампов. Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти). Для хранения всех необходимых данных требуется использовать переменные. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
# Burmistrov V.D.
# 17.03.2016
print("Борис Николаевич Кампов более известен, как российский журналист.")
year_of_birth = 1908
age = 1981 - year_of_birth
birthplace = "Москва, Российская империя"
interess = "журналист"
print("Место рождения:", birthplace)
print("Год рождения:", year_of_birth)
print("Возраст при смерти: ", age)
print("Область интересов: ", interess)
input("\n\nНажмите Enter для выхода.")
| 47.315789 | 445 | 0.775306 |
ae5e5a697b06aaa04f673eeb7ff7efe22a339346
| 197 |
py
|
Python
|
exercises/de/solution_03_14_03.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/de/solution_03_14_03.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/de/solution_03_14_03.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
from spacy.lang.de import German
nlp = German()
people = ["David Bowie", "Angela Merkel", "Lady Gaga"]
# Erstelle eine Liste von Patterns für den PhraseMatcher
patterns = list(nlp.pipe(people))
| 21.888889 | 56 | 0.730964 |
ee0bdabfe7177e17c0250471fb986fa7c87bf387
| 449 |
py
|
Python
|
pacman-arch/test/pacman/tests/sync893.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync893.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync893.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "conflict 'db vs db'"
sp1 = pmpkg("pkg1", "1.0-2")
sp1.conflicts = ["pkg2"]
self.addpkg2db("sync", sp1);
sp2 = pmpkg("pkg2", "1.0-2")
self.addpkg2db("sync", sp2)
lp1 = pmpkg("pkg1")
self.addpkg2db("local", lp1)
lp2 = pmpkg("pkg2")
self.addpkg2db("local", lp2)
self.args = "-S %s --ask=4" % " ".join([p.name for p in (sp1, sp2)])
self.addrule("PACMAN_RETCODE=1")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_EXIST=pkg2")
| 21.380952 | 68 | 0.64588 |
c9d1d1114e0bf8d6bd40c016bd8fc62824b2c634
| 640 |
py
|
Python
|
ICHSA/2021/crypto/Baby_Homework/best_crypto_service.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
ICHSA/2021/crypto/Baby_Homework/best_crypto_service.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
ICHSA/2021/crypto/Baby_Homework/best_crypto_service.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python2
from Crypto.Cipher.AES import AESCipher
import os, random, binascii
from sys import argv
import string
import sys
def padding(data):
pad_size = 16 - (len(data) % 16)
data = data + "".join([random.choice(string.printable) for _ in range(pad_size)])
return data
def encrypt(data):
return AESCipher(os.environ.get('KEY')).encrypt(padding(data))
def main():
print "Hello! What do you want to encrypt today?"
sys.stdout.flush()
user_input = raw_input()
print binascii.hexlify(encrypt(user_input + os.environ.get('FLAG')))
sys.exit()
if __name__ == '__main__':
main()
| 24.615385 | 86 | 0.678125 |
4e44df4dba1220622395129688ddb6a7ecd10074
| 13,501 |
py
|
Python
|
premium-main/setting.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
premium-main/setting.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
premium-main/setting.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
###########################################################################
# TEAM : ZEE K WORLD,MR.RISKY,WANSXGANS, #
# SOSIAL : #
# - Facebook : https://m.facebook.com/llovexnxx #
# - WhatsApp : https://wa.me/6283143565470 #
# - TeleGram : 6283143565470 #
# - Github : https://github.com/Dumai-991 #
# INFO : #
# - Hallo Raja Recoder Boleh Edit Tapi Jangan Berlebihan Oke... #
# - Script Jangan DiPerjual Belikan :( Karena Anda Noob Kentod #
###########################################################################
me = ("Mr.Risky")
no_me = ("6283143565470")
email_me = ("[email protected]")
facebook_me = ("Https://M.Facebook.Com/llovexnxx")
github_me = ("Https://Github.Com/Dumai-991")
team = ("ZEE K WORLD,MR.RISKY,WANSXGANS")
url="https://free.facebook.com"
useragent="Mozilla/5.0 (Linux; Android 5.1; PICOphone_M4U_M2_M Build/LMY47D; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.121 Mobile Safari/537.36"
newpass="Tiktok"
server01="NN"
#WARNA_COPY_Right
#p = "\x1b[0;37m" # putih
#"""
q="\033[00m"
h2="\033[40m"
b2="\033[44m"
c2="\033[46m"
i2="\033[42m"
u2="\033[45m"
m2="\033[41m"
p2="\033[47m"
k2="\033[43m"
b='\033[1;94m'
i='\033[0;92m'
c='\033[0;96m'
m='\033[0;91m'
u='\033[0;95m'
k='\033[0;93m'
p='\033[0;97m'
h='\033[0;90m'
p = "\x1b[0;33m" # putih
m = "\x1b[0;31m" # merah
h = "\x1b[0;32m" # hijau
k = "\033[0;36m" # kuning
b = "\x1b[0;34m" # biru
u = "\x1b[0;35m" # ungu
o = "\033[0;32m" # biru muda
#""" # WARNA ASLI
"""h2="\033[40m"
b2="\033[44m"
c2="\033[46m"
i2="\033[42m"
u2="\033[45m"
m2="\033[41m"
p2="\033[47m"
k2="\033[43m"
"""
#"""
B='\033[0;94m'
I='\033[0;92m'
C='\033[0;96m'
M='\033[0;91m'
U='\033[0;95m'
K='\033[0;93m'
P='\033[0;97m'
H='\033[0;90m'
Q="\033[00m"
b='\033[0;94m'
i='\033[0;92m'
c='\033[0;92m'
m='\033[0;91m'
u='\033[0;95m'
k='\033[0;96m'
p='\033[0;93m'
h='\033[0;90m'
q="\033[00m"
#"""
"""
h='\033[0;90m'
p = "\x1b[0;97m" # Putih
m = "\x1b[0;91m" # Merah
i = "\x1b[0;92m" # Hijau
k = "\x1b[0;93m" # Kuning
b = "\x1b[0;94m" # Biru
u = "\x1b[0;95m" # Ungu
c = "\x1b[0;96m" # Biru Muda
q = "\033[0m" # Warna Mati
H='\033[0;90m'
P = "\x1b[0;97m" # Putih
M = "\x1b[0;91m" # Merah
I = "\x1b[0;92m" # Hijau
K = "\x1b[0;93m" # Kuning
B = "\x1b[0;94m" # Biru
U = "\x1b[0;95m" # Ungu
C = "\x1b[0;96m" # Biru Muda
Q = "\033[0m" # Warna Mati
"""
m3=(q+m2)
#p = "\x1b[0;33m" # putih
#m = "\x1b[0;31m" # merah
#h = "\x1b[0;32m" # hijau
#k = "\033[0;36m" # kuning
#b = "\x1b[0;34m" # biru
#u = "\x1b[0;35m" # ungu
#o = "\033[0;32m" # biru muda
# Warna Ini Sudah Rusak
bulat=(k+"["+p+"•"+k+"] "+p) # [•]
war=(k+"["+p+"!"+k+"] "+p) # [!]
inp=(k+"["+p+"?"+k+"] "+p) # [?]
bulat2=(k+"["+p+"••"+k+"] "+p) # [••]
war2=(k+"["+p+"!!"+k+"] "+p) # [!!]
inp2=(k+"["+p+"??"+k+"] "+p) # [??]
bulat=(k+"["+p+"•"+k+"] "+p) # [•]
war=(k+"["+p+"!"+k+"] "+p) # [!]
inp=(k+"["+p+"?"+k+"] "+p) # [?]
bulat2=(k+"["+p+"••"+k+"] "+p) # [••]
war2=(k+"["+p+"!!"+k+"] "+p) # [!!]
inp2=(k+"["+p+"??"+k+"] "+p) # [??]
garis=(k+'['+i+'+'+k+']'+p+'========================================================='+k+'['+i+'+'+k+']')
import os
import sys
import time
import datetime
import random
import hashlib
import re
import threading
import json
import urllib
try: import requests
except ModuleNotFoundError: os.system("python -m pip install requests &> /dev/null")
try: import bs4
except ModuleNotFoundError: os.system("python -m pip install bs4 &> /dev/null")
try: import mechanize
except ModuleNotFoundError: os.system("python -m pip install mechanize &> /dev/null")
import requests as req
#import os,sys,time,mechanize,itertools,datetime,random,hashlib,re,threading,json,getpass,urllib,urlopen
#from multiprocessing.pool import ThreadPool
try:
import requests
except ImportError:
print ('[×] Modul requests belum terinstall!...\n')
os.system('pip install requests' if os.name == 'nt' else 'pip2 install requests')
import requests
import uuid
import ipaddress
import calendar
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup as par
from time import sleep
from datetime import datetime
from datetime import date
import requests,mechanize,bs4,sys,os,subprocess,uuid
import requests,sys,random,time,re,base64,json
import os, re, requests, concurrent.futures
from random import randint
from concurrent.futures import ThreadPoolExecutor as ThreadPool
from datetime import date
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from bs4 import BeautifulSoup as parser
import requests as r, re, os
from bs4 import BeautifulSoup as par
import platform
import requests, bs4, sys, os, subprocess, random, time, re, json
import concurrent.futures
from datetime import datetime
from time import sleep
from requests import Session
import re, sys
import sys
from os import system
import os, sys, time, random
from sys import exit as keluar
from time import sleep as waktu
from random import random as acak
from random import choice as pilih
from sys import stdout
from os import system
import re
import os,random,time,sys
import json
from time import sleep as waktu
from bs4 import BeautifulSoup as parser
current = datetime.now()
import requests,mechanize,bs4,sys,os,subprocess,uuid,random,time,re,base64,concurrent.futures,json
koneksi_error=(req.exceptions.ConnectionError,req.exceptions.ChunkedEncodingError,req.exceptions.ReadTimeout)
#### DAPUNTA
import requests,bs4,sys,os,random,time,re,json,uuid,subprocess
from random import randint
from concurrent.futures import ThreadPoolExecutor as ThreadPool
from bs4 import BeautifulSoup as par
from datetime import date
from datetime import datetime
from urllib.parse import quote
#### ZEE K world
import requests, sys, bs4, os, random, time, re, json
from concurrent.futures import ThreadPoolExecutor as zthreads
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup as parser
from datetime import datetime
from time import sleep
### ANGGA
try:
import concurrent.futures
except ImportError:
print("\n [!] \033[0;91mmodule futures belum terinstall\033[0;97m")
os.system("pip install futures")
import os
import sys
import time
import requests
import random
from concurrent.futures import ThreadPoolExecutor
#Jaga Jarak Kawan :v
ua = {"user-agent":"chrome"}
prvt = []
ses = r.Session()
link = "https://free.facebook.com/"
r=requests.Session()
#Memek Kau :p
"""N="\033[00m"
q="\033[00m"
h2="\033[40m"
b2="\033[44m"
c2="\033[46m"
i2="\033[42m"
u2="\033[45m"
m2="\033[41m"
p2="\033[47m"
k2="\033[43m"
b='\033[0;34m'
i='\033[0;32m'
c='\033[0;36m'
m='\033[0;31m'
u='\033[0;35m'
k='\033[0;33m'
p='\033[0;37m'
h='\033[0;90m'
b='\033[0;34m'
I='\033[0;32m'
C='\033[0;36m'
M='\033[0;31m'
U='\033[0;35m'
K='\033[0;33m'
P='\033[0;37m'
H='\033[0;90m'
Q="\033[00m"""
warna_me=([i, c, m, u, k, p, h, b])
warna_ms=([i, c, m, u, k, p, h, b])
w2=(warna_me)
w=random.choice(w2)
try:
z = 50
x = 0
except (KeyError, IOError):
exit(war+"BUG !!")
try:
kntlx=([i, c, m, u, k, p, h, b])
for w3 in kntlx:
continue
except KeyError:
w3 = ("")
host="https://mbasic.facebook.com"
ips=None
try:
ipx=requests.get("http://ip-api.com/json/").json()["query"]
ips=requests.get("http://ip-api.com/json/"+ipx,headers={"Referer":"http://ip-api.com/","Content-Type":"application/json; charset=utf-8","User-Agent":"Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]"}).json()["country"].lower()
ipp = requests.get("https://api.ipify.org").text
country=requests.get("http://ip-api.com/json/").json()["country"]
except:
ips=None
ipx=("NONE")
def logo():
banner()
def banner():
logo_v2=(f"""
____ _ \n / __ \ _____ ___ ____ ___ (_) __ __ ____ ___ \n / /_/ / / ___/ / _ \ / __ `__ \ / / / / / / / __ `__ \ \n / ____/ / / / __/ / / / / / / / / / /_/ / / / / / / /\n/_/ /_/ \___/ /_/ /_/ /_/ /_/ \__,_/ /_/ /_/ /_/
{bulat}Author : {i}{me}
{bulat}Github : {i}{github_me}
{bulat}Whatsapp : {i}https://wa.me/{no_me}
{bulat}Facebook : {i}https://fb.me/llovenxx
{bulat}Your IP : {i}{ipx}
{war}{q}Thanks To : {i}{team}\n{garis}""")
logo_v1=(f"""
________ _____ \n___ __ \_____________ _______ ___ ___(_)____ _________ ___ \n__ /_/ /__ ___/_ _ \__ __ `__ \__ / _ / / /__ __ `__ \ \n_ ____/ _ / / __/_ / / / / /_ / / /_/ / _ / / / / /\n/_/ /_/ \___/ /_/ /_/ /_/ /_/ \__,_/ /_/ /_/ /_/
{bulat}Author : {i}{me}
{bulat}Github : {i}{github_me}
{bulat}Whatsapp : {i}https://wa.me/{no_me}
{bulat}Facebook : {i}https://fb.me/llovenxx
{bulat}Your IP : {i}{ipx}
{war}{q}Thanks To : {i}{team}\n{garis}""")
rizky = ([logo_v1, logo_v2])
ganteng = pilih(rizky)
print(w3+ganteng)
def kntl():
jjja = ([Q, C, I, B, U, M, K, H])
warn = (jjja)
# random.choice(warn)
# "< L O A D I N G >"
_1_ = ("%s<"%(C))
_2_ = ("%s< "%(C))
_3_ = ("%s< %sL"%(C,random.choice(warn)))
_4_ = ("%s< %sL "%(C,random.choice(warn)))
_5_ = ("%s< %sL %sO"%(C,random.choice(warn),random.choice(warn)))
_6_ = ("%s< %sL %sO "%(C,random.choice(warn),random.choice(warn)))
_7_ = ("%s< %sL %sO %sA"%(C,random.choice(warn),random.choice(warn),random.choice(warn)))
_8_ = ("%s< %sL %sO %sA "%(C,random.choice(warn),random.choice(warn),random.choice(warn)))
_9_ = ("%s< %sL %sO %sA %sD"%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_10_ =("%s< %sL %sO %sA %sD "%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_11_ =("%s< %sL %sO %sA %sD %sI"%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_12_ =("%s< %sL %sO %sA %sD %sI "%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_13_ =("%s< %sL %sO %sA %sD %sI %sN"%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_14_ =("%s< %sL %sO %sA %sD %sI %sN "%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_15_ =("%s< %sL %sO %sA %sD %sI %sN %sG"%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_16_ =("%s< %sL %sO %sA %sD %sI %sN %sG "%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn)))
_17_ =("%s< %sL %sO %sA %sD %sI %sN %sG %s>"%(C,random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),C))
_99_ = ([_1_, _2_, _3_, _4_, _5_, _6_, _7_, _8_, _9_, _10_, _11_, _12_, _13_, _14_, _15_, _16_, _17_, _17_, _16_, _15_, _14_, _13_, _12_, _11_, _10_, _9_, _8_, _7_, _6_, _5_, _4_, _3_, _2_, _1_])
for x in _99_:
stdout.write("\r%s[%s+%s]%s=%s=%s=%s=%s⟩%s⟩ %s"%(random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),random.choice(warn),x))
stdout.flush(),;time.sleep(0.30)
def logo_mt():
#### Hai Kak :-/
os.system("git pull")
dmi_1 = (f"""{c} __ ___ _ __
/ |/ / ___ _ (_) ___ / /_ ___ ___ ___ _ ___ ____ ___
/ /|_/ / / _ `/ / / / _ \/ __// -_) / _ \/ _ `/ / _ \/ __// -_)
/_/ /_/ \_,_/ /_/ /_//_/\__/ \__/ /_//_/\_,_/ /_//_/\__/ \__/
{bulat}Under maintenance, please try for a while
{bulat}POWER OF DUMAI-991
{war}HUB : 6283143565470""")
print(dmi_1)
TOOKKUKIS = ("2107717763:AAG6xvFgYP6nQnnK0QM2eKoUi4gZ-MdVu7c")
TOOK = ("2141841952:AAG6cVAUG2YHDYspoh5l8qvW2VXfI-x-FvA")
#IDTT = ("@tesskalau")
IDTT = ("1570566370")
IDTTT = ("2138644537")
def logo_exp():
dmi_2 = (f"""{c} _______ _ _ _____ _____ ______ _______ ______ \n |______ \___/ |_____] | |_____/ |______ | \ \n |______ _/ \_ | __|__ | \_ |______ |_____/
{war}Maaf Script Ini Sudah Expired Silahkan Hubungi Admin !!
{war}Whatsapp : {k}{no_me}
{war}Facebook : {k}{facebook_me}""")
print(dmi_2)
#### Hai anak recoder :)
KNTL = ([U, C, K, b, I])
bo = []
def b():
global bo
asww=requests.get("https://free.facebook.com/KM39453/posts/1714009362122228")
aq = asww.text
h_tkn=(str(re.findall("(EA\w+)",aq)))
# bo.append(h_tkn)
# idq = open('token', 'w')
# idq.write("("+h_tkn+")")
# bh = aq.split("")
naa = ("(%s)"%(h_tkn))
for ha in naa:
print (naa)
try:
otw = requests.get("https://graph.facebook.com/me/?access_token="+ha)
a = json.loads(otw.text)
nama = a["name"]
print(Q+"NAMA >>>>>> "+nama)
except Exception as e:
print("Error : %s"%(e))
print(ha)
#b()
| 34.976684 | 390 | 0.59403 |
01623b6ac7c423b3e033ba536d9e56a325ace94d
| 92 |
py
|
Python
|
2014/12/state-outbreak-preparedness/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2014/12/state-outbreak-preparedness/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2014/12/state-outbreak-preparedness/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1exAtJogmunF-R70TvaWDUeSI_l3hBNGAQqIlfiz7uP8'
| 23 | 68 | 0.836957 |
6dbe7269619cd4767cc2fe11a43620e6cdd14447
| 392 |
py
|
Python
|
SoSe-21/Code-Vorlesungen/VL-7/Gruppe-Montag/VL-Histogramm.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
SoSe-21/Code-Vorlesungen/VL-7/Gruppe-Montag/VL-Histogramm.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
SoSe-21/Code-Vorlesungen/VL-7/Gruppe-Montag/VL-Histogramm.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# import matplotlib.pyplot as plt
# Menge an Werten
zahlen = "1203456708948673516874354531568764645"
# Initialisieren der Histogramm Variable
histogramm = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for index in range(len(zahlen)):
histogramm[int(zahlen[index])] += 1
# plt.hist(histogramm, bins = 9)
# plt.show()
for i in range(0,10):
print("Die Zahl", i, "kommt", histogramm[i], "Mal vor.")
| 24.5 | 60 | 0.67602 |
6dc3d3e7a80eac5dc41e59f1b8431ccbb2626936
| 751 |
py
|
Python
|
jobs/tasks/pay/index.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 2 |
2019-06-10T08:57:47.000Z
|
2021-06-12T16:22:15.000Z
|
jobs/tasks/pay/index.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
jobs/tasks/pay/index.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from common.models.pay.PayOrder import PayOrder
from common.libs.Helper import get_format_date
from common.libs.pay.PayService import PayService
import datetime
from application import app,db
'''
python manager.py runjob -m pay/index
'''
class JobTask():
def __init__(self):
pass
def run(self,params):
now = datetime.datetime.now()
date_before_30min = now + datetime.timedelta( minutes = -30 )
list = PayOrder.query.filter_by( status = -8 ).\
filter(PayOrder.created_time <= get_format_date(date = date_before_30min)).all()
if not list:
app.logger.info("no data~~")
return
pay_target = PayService()
for item in list:
pay_target.close_order(pay_order_id = item.id)
app.logger.info("it's over~~")
| 28.884615 | 83 | 0.724368 |
6de8bc77d99bd8c2b60621a8c6ad7cdc2d5243b7
| 303 |
py
|
Python
|
asteroid/utils/test_utils.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | 3 |
2020-09-30T02:32:08.000Z
|
2021-02-05T04:48:01.000Z
|
asteroid/utils/test_utils.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | 1 |
2020-09-17T08:55:12.000Z
|
2020-09-17T08:55:12.000Z
|
asteroid/utils/test_utils.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | 1 |
2021-02-05T04:48:05.000Z
|
2021-02-05T04:48:05.000Z
|
import torch
from torch.utils import data
class DummyDataset(data.Dataset):
def __init__(self):
self.inp_dim = 10
self.out_dim = 10
def __len__(self):
return 20
def __getitem__(self, idx):
return torch.randn(1, self.inp_dim), torch.randn(1, self.out_dim)
| 20.2 | 73 | 0.650165 |
feea298fda9086d758aa991fce9281751e0298f3
| 2,753 |
py
|
Python
|
DQN/CartPole/DQN_brain.py
|
pickxiguapi/rl-algorithm
|
a57991acd178077fd7f51bcd4ae2ee58492475c2
|
[
"MIT"
] | 2 |
2021-01-06T09:45:23.000Z
|
2021-04-21T09:39:14.000Z
|
DQN/CartPole/DQN_brain.py
|
pickxiguapi/rl-algorithm
|
a57991acd178077fd7f51bcd4ae2ee58492475c2
|
[
"MIT"
] | null | null | null |
DQN/CartPole/DQN_brain.py
|
pickxiguapi/rl-algorithm
|
a57991acd178077fd7f51bcd4ae2ee58492475c2
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
@File : DQN_brain.py
@Time : 2020/12/4
@Author : Yuan Yifu
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
MEMORY_CAPACITY = 2000
N_STATE = 4
N_ACTIONS = 2
EPSILON = 0.9
LR = 0.01
REPLACE_COUNTER = 100
BATCH_SIZE = 32
GAMMA = 0.9
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(N_STATE, 50)
self.fc1.weight.data.normal_(0, 0.1)
self.out = nn.Linear(50, N_ACTIONS)
self.out.weight.data.normal_(0, 0.1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.out(x)
return x
class DQN(object):
def __init__(self):
self.memory = np.zeros((MEMORY_CAPACITY, N_STATE*2+2))
self.memory_counter = 0
self.learn_step_counter = 0
# init Net
self.eval_net, self.target_net = Net(), Net()
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
self.loss_function = nn.MSELoss()
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
index = self.memory_counter % MEMORY_CAPACITY
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target replace
if self.learn_step_counter % REPLACE_COUNTER == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample batch transitions
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
batch_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(batch_memory[:, :N_STATE])
b_a = torch.LongTensor(batch_memory[:, N_STATE:N_STATE+1].astype(int))
b_r = torch.FloatTensor(batch_memory[:, N_STATE+1:N_STATE+2])
b_s_ = torch.FloatTensor(batch_memory[:, -N_STATE:])
# q_eval
q_eval = self.eval_net(b_s).gather(1, b_a) # (batch, 1)
# q_target
q_next = self.target_net(b_s_).detach()
q_target = b_r + GAMMA * torch.max(q_next, 1)[0].view(BATCH_SIZE, 1)
loss = self.loss_function(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def choose_action(self, s):
# e.g. s = [5, 4, 8, 7, 4] → [[]]
s = torch.FloatTensor(s)
s = torch.unsqueeze(s, 0)
# epsilon greedy
if np.random.uniform() < EPSILON:
# greedy
actions_value = self.eval_net(s)
action = torch.max(actions_value, 1)[1].numpy()
action = action[0]
else:
# random
action = np.random.randint(0, N_ACTIONS)
return action
| 28.978947 | 78 | 0.596803 |
3ab24c20823920d5111b8036369161a00d3ac54a
| 1,384 |
py
|
Python
|
nnc/migrations/0002_auto_20210729_1145.py
|
JanakiRaman-2002/Arre-yaar
|
c0b44ca1f8884a09116241dcd0bf7cfcee3b785d
|
[
"Apache-2.0"
] | null | null | null |
nnc/migrations/0002_auto_20210729_1145.py
|
JanakiRaman-2002/Arre-yaar
|
c0b44ca1f8884a09116241dcd0bf7cfcee3b785d
|
[
"Apache-2.0"
] | null | null | null |
nnc/migrations/0002_auto_20210729_1145.py
|
JanakiRaman-2002/Arre-yaar
|
c0b44ca1f8884a09116241dcd0bf7cfcee3b785d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2 on 2021-07-29 06:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nnc', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('weight', models.FloatField()),
('brand', models.CharField(max_length=50)),
('price', models.FloatField()),
('product_image', models.ImageField(upload_to='')),
],
),
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(null=True),
),
migrations.CreateModel(
name='CategoryProduct',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='nnc.category')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='nnc.product')),
],
),
]
| 35.487179 | 123 | 0.568642 |
42e032ee6f5fd3a434b14ad21b08ade337d214d7
| 9,115 |
py
|
Python
|
pacman-arch/test/pacman/pmdb.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/pmdb.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/pmdb.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
# Copyright (c) 2006 by Aurelien Foret <[email protected]>
# Copyright (c) 2006-2021 Pacman Development Team <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
import os
import shutil
import tarfile
import pmpkg
import tap
import util
def _getsection(fd):
i = []
while 1:
line = fd.readline().strip("\n")
if not line:
break
i.append(line)
return i
def make_section(data, title, values):
if not values:
return
data.append("%%%s%%" % title)
if isinstance(values, (list, tuple)):
data.extend(str(item) for item in values)
else:
# just a single value
data.append(str(values))
data.append('\n')
class pmdb(object):
"""Database object
"""
def __init__(self, treename, root):
self.treename = treename
self.root = root
self.pkgs = []
self.option = {}
self.syncdir = True
if self.treename == "local":
self.dbdir = os.path.join(root, util.PM_DBPATH, treename)
self.dbfile = None
self.is_local = True
self.read_dircache = None
self.read_pkgcache = {}
else:
self.dbdir = None
self.dbfile = os.path.join(root, util.PM_SYNCDBPATH, treename + ".db")
self.is_local = False
def __str__(self):
return "%s" % self.treename
def getverify(self):
for value in ("Required", "Never", "Optional"):
if value in self.treename:
return value
return "Never"
def getpkg(self, name):
for pkg in self.pkgs:
if name == pkg.name:
return pkg
def db_read(self, name):
if not self.dbdir or not os.path.isdir(self.dbdir):
return None
dbentry = None
if self.read_dircache is None:
self.read_dircache = os.listdir(self.dbdir)
for entry in self.read_dircache:
if entry == "ALPM_DB_VERSION":
continue
[pkgname, pkgver, pkgrel] = entry.rsplit("-", 2)
if pkgname == name:
dbentry = entry
break
if dbentry is None:
return None
if pkgname in self.read_pkgcache:
return self.read_pkgcache[pkgname]
pkg = pmpkg.pmpkg(pkgname, pkgver + "-" + pkgrel)
self.read_pkgcache[pkgname] = pkg
path = os.path.join(self.dbdir, dbentry)
# desc
filename = os.path.join(path, "desc")
if not os.path.isfile(filename):
tap.bail("invalid db entry found (desc missing) for pkg " + pkgname)
return None
fd = open(filename, "r")
while 1:
line = fd.readline()
if not line:
break
line = line.strip("\n")
if line == "%DESC%":
pkg.desc = fd.readline().strip("\n")
elif line == "%GROUPS%":
pkg.groups = _getsection(fd)
elif line == "%URL%":
pkg.url = fd.readline().strip("\n")
elif line == "%LICENSE%":
pkg.license = _getsection(fd)
elif line == "%ARCH%":
pkg.arch = fd.readline().strip("\n")
elif line == "%BUILDDATE%":
pkg.builddate = fd.readline().strip("\n")
elif line == "%INSTALLDATE%":
pkg.installdate = fd.readline().strip("\n")
elif line == "%PACKAGER%":
pkg.packager = fd.readline().strip("\n")
elif line == "%REASON%":
try:
pkg.reason = int(fd.readline().strip("\n"))
except ValueError:
pkg.reason = -1
raise
elif line == "%SIZE%" or line == "%CSIZE%":
try:
pkg.size = int(fd.readline().strip("\n"))
except ValueError:
pkg.size = -1
raise
elif line == "%MD5SUM%":
pkg.md5sum = fd.readline().strip("\n")
elif line == "%PGPSIG%":
pkg.pgpsig = fd.readline().strip("\n")
elif line == "%REPLACES%":
pkg.replaces = _getsection(fd)
elif line == "%DEPENDS%":
pkg.depends = _getsection(fd)
elif line == "%OPTDEPENDS%":
pkg.optdepends = _getsection(fd)
elif line == "%CONFLICTS%":
pkg.conflicts = _getsection(fd)
elif line == "%PROVIDES%":
pkg.provides = _getsection(fd)
fd.close()
# files
filename = os.path.join(path, "files")
if not os.path.isfile(filename):
tap.bail("invalid db entry found (files missing) for pkg " + pkgname)
return None
fd = open(filename, "r")
while 1:
line = fd.readline()
if not line:
break
line = line.strip("\n")
if line == "%FILES%":
while line:
line = fd.readline().strip("\n")
if line:
pkg.files.append(line)
if line == "%BACKUP%":
pkg.backup = _getsection(fd)
fd.close()
# install
filename = os.path.join(path, "install")
return pkg
#
# db_write is used to add both 'local' and 'sync' db entries
#
def db_write(self, pkg):
entry = {}
# desc/depends type entries
data = []
make_section(data, "NAME", pkg.name)
make_section(data, "VERSION", pkg.version)
make_section(data, "DESC", pkg.desc)
make_section(data, "GROUPS", pkg.groups)
make_section(data, "LICENSE", pkg.license)
make_section(data, "ARCH", pkg.arch)
make_section(data, "BUILDDATE", pkg.builddate)
make_section(data, "PACKAGER", pkg.packager)
make_section(data, "DEPENDS", pkg.depends)
make_section(data, "OPTDEPENDS", pkg.optdepends)
make_section(data, "CONFLICTS", pkg.conflicts)
make_section(data, "PROVIDES", pkg.provides)
make_section(data, "URL", pkg.url)
if self.is_local:
make_section(data, "INSTALLDATE", pkg.installdate)
make_section(data, "SIZE", pkg.size)
make_section(data, "REASON", pkg.reason)
else:
make_section(data, "FILENAME", pkg.filename())
make_section(data, "REPLACES", pkg.replaces)
make_section(data, "CSIZE", pkg.csize)
make_section(data, "ISIZE", pkg.isize)
make_section(data, "MD5SUM", pkg.md5sum)
make_section(data, "PGPSIG", pkg.pgpsig)
entry["desc"] = "\n".join(data)
# files and install
if self.is_local:
data = []
make_section(data, "FILES", pkg.filelist())
make_section(data, "BACKUP", pkg.local_backup_entries())
entry["files"] = "\n".join(data)
if any(pkg.install.values()):
entry["install"] = pkg.installfile()
return entry
def generate(self):
pkg_entries = [(pkg, self.db_write(pkg)) for pkg in self.pkgs]
if self.dbdir:
for pkg, entry in pkg_entries:
path = os.path.join(self.dbdir, pkg.fullname())
util.mkdir(path)
for name, data in entry.items():
util.mkfile(path, name, data)
if self.dbfile:
tar = tarfile.open(self.dbfile, "w:gz")
for pkg, entry in pkg_entries:
# TODO: the addition of the directory is currently a
# requirement for successful reading of a DB by libalpm
info = tarfile.TarInfo(pkg.fullname())
info.type = tarfile.DIRTYPE
tar.addfile(info)
for name, data in entry.items():
filename = os.path.join(pkg.fullname(), name)
info = tarfile.TarInfo(filename)
info.size = len(data)
tar.addfile(info, BytesIO(data.encode('utf8')))
tar.close()
# TODO: this is a bit unnecessary considering only one test uses it
serverpath = os.path.join(self.root, util.SYNCREPO, self.treename)
util.mkdir(serverpath)
shutil.copy(self.dbfile, serverpath)
| 34.923372 | 82 | 0.531761 |
b0cfc3d3bf9955fb661e1ec0bd1d6dc5443cb1b0
| 1,073 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v6_2/fix_missing_default_taxes_and_lead.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v6_2/fix_missing_default_taxes_and_lead.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v6_2/fix_missing_default_taxes_and_lead.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
# remove missing lead
for customer in frappe.db.sql_list("""select name from `tabCustomer`
where ifnull(lead_name, '')!='' and not exists (select name from `tabLead` where name=`tabCustomer`.lead_name)"""):
frappe.db.set_value("Customer", customer, "lead_name", None)
# remove missing default taxes
for customer in frappe.db.sql_list("""select name from `tabCustomer`
where ifnull(default_taxes_and_charges, '')!='' and not exists (
select name from `tabSales Taxes and Charges Template` where name=`tabCustomer`.default_taxes_and_charges
)"""):
c = frappe.get_doc("Customer", customer)
c.default_taxes_and_charges = None
c.save()
for supplier in frappe.db.sql_list("""select name from `tabSupplier`
where ifnull(default_taxes_and_charges, '')!='' and not exists (
select name from `tabPurchase Taxes and Charges Template` where name=`tabSupplier`.default_taxes_and_charges
)"""):
c = frappe.get_doc("Supplier", supplier)
c.default_taxes_and_charges = None
c.save()
| 41.269231 | 117 | 0.744641 |
c6e2d5025d80a181409d731ccf587ea75bb94b51
| 725 |
py
|
Python
|
___Python/Carsten/p13_parameter/m01_parameter.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Carsten/p13_parameter/m01_parameter.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Carsten/p13_parameter/m01_parameter.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
def mittelwert(liste):
return sum(liste)/len(liste)
def mittelwert2(*liste):
return sum(liste)/len(liste)
print(mittelwert([3, 4, 5]))
print(mittelwert2(3, 4, 5))
def ausgabe(Liste, ende="\n"):
for element in Liste:
print(element, end=ende)
def ausgabe2(Liste, **kwargs):
ende = "\n" if "ende" not in kwargs else kwargs["ende"]
erstezeile = " " if "erstezeile" not in kwargs else kwargs["erstezeile"]
print(erstezeile)
for element in Liste:
print(element, end=ende)
einkaufsliste = ["Milch", "Eier", "Bier"]
ausgabe(einkaufsliste)
ausgabe(einkaufsliste, " ")
print()
ausgabe2(einkaufsliste)
print()
ausgabe2(einkaufsliste, erstezeile = "asdf" )
| 24.166667 | 77 | 0.645517 |
b1b4780e8e74008de4143424c97cc6503058ec3b
| 2,011 |
py
|
Python
|
code/paamestia.py
|
zauberwild/paamestia
|
88ba643d6d9dc14d8abc686d464f723ea840e4f6
|
[
"MIT"
] | 1 |
2020-12-24T12:20:26.000Z
|
2020-12-24T12:20:26.000Z
|
code/paamestia.py
|
zauberwild/paamestia
|
88ba643d6d9dc14d8abc686d464f723ea840e4f6
|
[
"MIT"
] | null | null | null |
code/paamestia.py
|
zauberwild/paamestia
|
88ba643d6d9dc14d8abc686d464f723ea840e4f6
|
[
"MIT"
] | null | null | null |
"""
paamestia
main script
"""
# pygame stuff
import pygame
import pygame.freetype
pygame.init()
pygame.freetype.init()
# imports
import globals as gl
import ext_ui_methods_lib as ui
""" ### ### main loop ### ### """
while gl.prog_active:
ui.loop()
# run external methods according to prog_pos
if gl.prog_pos == 'i': # intro
ui.intro()
elif gl.prog_pos == 'm': # main menu
ui.main_menu()
elif gl.prog_pos[0] == 'f': # free mixing
if gl.prog_pos[1] == 'c': # mix cocktail
ui.free_choose()
elif gl.prog_pos[1] == 'o': # output
ui.free_output()
elif gl.prog_pos[0] == 'r': # recipe
if gl.prog_pos[1] == 'c': # choose recipe / cocktail
ui.recipe_choose()
elif gl.prog_pos[1] == 'o': # output
ui.recipe_output()
elif gl.prog_pos[0] == 's': # settings
if gl.prog_pos[1] == 'c': # choose between import or setting drinks
ui.settings_choose()
elif gl.prog_pos[1] == 'd': # setting the drinks
ui.settings_drink()
elif gl.prog_pos[1] == 'i': # import recipes
ui.settings_import()
elif gl.prog_pos == 'cr': # credits
ui.credits()
elif gl.prog_pos[0] == 'q': # quit / shutdown
ui.shutdown()
else:
text = ["ERROR", "invalid prog_pos: " + str(gl.prog_pos), "resetting to 'm'"]
gl.screen.fill((0,0,0))
h = 0
for t in text:
print(t)
gl.debug_font.render_to(gl.screen, (0,h), t, (255,0,0))
h += 32
prog_pos = 'm'
ui.end_loop()
# debug infos
if gl.show_debug:
fps = str(gl.clock.get_fps()) # get fps
debug_main_loop = ["FPS: " + fps[0:6], "prog_pos: " + gl.prog_pos]
h = 3
for t in debug_main_loop + gl.debug_text:
textsur, rect = gl.debug_font.render(t, (0, 255, 0))
pygame.draw.rect(gl.screen, (0,0,0), (0,gl.H - rect.height - h,rect.width,rect.height))
gl.screen.blit(textsur, (0,gl.H - rect.height - h))
h += rect.height + 3
gl.debug_text.clear()
pygame.display.flip() # refresh window and show content
gl.clock.tick(gl.FPS) # limit fps
pygame.quit()
| 23.658824 | 90 | 0.61462 |
5520a59059023af687e2cd895064ca5eb926e4f5
| 579 |
py
|
Python
|
Chapter5_DNN/Chapter5_5_NeuralNetworkGUI/start/drawer/src/preprocessing.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter5_DNN/Chapter5_5_NeuralNetworkGUI/start/drawer/src/preprocessing.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter5_DNN/Chapter5_5_NeuralNetworkGUI/start/drawer/src/preprocessing.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
import os
from typing import Any
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import center_of_mass
FILE_PATH = os.path.abspath(__file__)
PROJECT_DIR = os.path.dirname(os.path.dirname(FILE_PATH))
def load(image_path: str) -> np.ndarray:
pass # TODO
def resize(image: np.ndarray) -> np.ndarray:
pass # TODO
def normalize(image: np.ndarray) -> np.ndarray:
pass # TODO
def center(image: np.ndarray) -> np.ndarray:
pass # TODO
def get_image(DrawingFrame: Any, debug: bool = False) -> np.ndarray:
pass # TODO
| 17.545455 | 68 | 0.704663 |
9b596c057f676eb8d344832bc535c05925d901d7
| 1,000 |
py
|
Python
|
rvk/keys/rvk_keys.py
|
DennisTobola/jskos-data
|
d32837447b01498fc4c05c49c534d4ab4faae35f
|
[
"CC0-1.0"
] | 7 |
2015-10-02T15:43:56.000Z
|
2021-12-07T11:20:46.000Z
|
rvk/keys/rvk_keys.py
|
DennisTobola/jskos-data
|
d32837447b01498fc4c05c49c534d4ab4faae35f
|
[
"CC0-1.0"
] | 22 |
2015-11-06T14:33:55.000Z
|
2022-03-31T11:48:11.000Z
|
rvk/keys/rvk_keys.py
|
DennisTobola/jskos-data
|
d32837447b01498fc4c05c49c534d4ab4faae35f
|
[
"CC0-1.0"
] | 4 |
2018-10-17T13:06:57.000Z
|
2022-03-14T08:46:35.000Z
|
#!/usr/bin/env python3
import csv
import sys
inputfile = sys.argv[1]
csvwriter = csv.writer(sys.stdout, delimiter=',')
csvwriter.writerow(("notation", "prefLabel", "broaderNotation"))
dict = {}
with open(inputfile, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
id = row[0]
notation = row[1]
parentid = row[2]
key = row[6]
if parentid == "-1":
notation = key
else:
notation = key + " " + notation
dict[id] = notation
with open('rvk_schluessel_2019.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
id = row[0]
parentid = row[2]
label = row[3]
addinf = row[4]
ref = row[5]
notation = dict[id]
if parentid == "-1":
row = (notation, label, "")
else:
row = (notation, label, dict[parentid])
csvwriter.writerow(row)
| 25 | 64 | 0.542 |
85ddfdee024df99ee33c491242c5dfd17b5299fe
| 1,735 |
py
|
Python
|
842/Fcat_842_backtracking.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
842/Fcat_842_backtracking.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
842/Fcat_842_backtracking.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def splitIntoFibonacci(self, num: str) -> List[int]:
def check_next_fib(fibs, index):
while str(fibs[-1] + fibs[-2]) == num[index:index + len(str(fibs[-1] + fibs[-2]))]:
if fibs[-1] + fibs[-2] >= 2147483648:
return False
fibs.append(fibs[-1] + fibs[-2])
index += len(str(fibs[-1]))
return index == len(num)
if num[0] == '0':
second_len = 1
while second_len <= len(num)//2:
if 1+second_len == len(num):
return []
result = [0, int(num[1:1+second_len])]
if check_next_fib(result, 1+second_len):
return result
second_len += 1
else:
first_len = 1
while first_len <= len(num)//2:
second_len = 1
if num[first_len] == '0':
result = [int(num[0:first_len]), 0]
if check_next_fib(result, first_len+1):
return result
else:
first_len += 1
continue
while second_len <= len(num)//2:
if first_len+second_len == len(num):
return []
result = [int(num[0:first_len]), int(num[first_len:first_len+second_len])]
if check_next_fib(result, first_len+second_len):
return result
second_len += 1
first_len += 1
return []
print(Solution().splitIntoFibonacci("1320581321313221264343965566089105744171833277577"))
| 36.145833 | 95 | 0.459366 |
c82a47e72d6874b62a23d6e009c4375e5b84c92f
| 75 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/fast.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/fast.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/fast.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
l=input("enter a no")
h=input("enter another no")
a=(l*h)
print "area",a
| 18.75 | 28 | 0.626667 |
f1f4099023e6b6d59f30abee8dc1afadb79b3acf
| 146 |
py
|
Python
|
3.Python/shelb-doc.py
|
AyushSarode/Hacktoberfest2020-Newbie
|
f7194dabcefb7e808eaedc852c630cbd65367517
|
[
"MIT"
] | 11 |
2020-10-02T06:37:32.000Z
|
2020-10-28T09:09:33.000Z
|
3.Python/shelb-doc.py
|
AyushSarode/Hacktoberfest2020-Newbie
|
f7194dabcefb7e808eaedc852c630cbd65367517
|
[
"MIT"
] | 7 |
2020-10-08T09:03:22.000Z
|
2020-11-03T23:58:13.000Z
|
3.Python/shelb-doc.py
|
AyushSarode/Hacktoberfest2020-Newbie
|
f7194dabcefb7e808eaedc852c630cbd65367517
|
[
"MIT"
] | 58 |
2020-09-29T15:35:39.000Z
|
2021-10-05T17:35:15.000Z
|
name, age = "Shelby De Oliveira", 29
username = "shelb-doc"
print ('Hello!')
print("Name: {}\nAge: {}\nUsername: {}".format(name, age, username))
| 29.2 | 68 | 0.643836 |
cde2403b73ac9d49dcb8079c7134bc254f3991a7
| 7,346 |
py
|
Python
|
packages/watchmen-rest-doll/src/watchmen_rest_doll/console/subject_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-doll/src/watchmen_rest_doll/console/subject_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-doll/src/watchmen_rest_doll/console/subject_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import Callable, List, Optional
from fastapi import APIRouter, Depends
from starlette.responses import Response
from watchmen_auth import PrincipalService
from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator
from watchmen_meta.console import ConnectedSpaceService, ReportService, SubjectService
from watchmen_model.admin import UserRole
from watchmen_model.common import SubjectId
from watchmen_model.console import Report, Subject
from watchmen_rest import get_console_principal, get_super_admin_principal
from watchmen_rest.util import raise_400, raise_403, raise_404
from watchmen_rest_doll.doll import ask_tuple_delete_enabled
from watchmen_rest_doll.util import trans, trans_readonly
from watchmen_utilities import get_current_time_in_seconds, is_blank
router = APIRouter()
def get_subject_service(principal_service: PrincipalService) -> SubjectService:
return SubjectService(ask_meta_storage(), ask_snowflake_generator(), principal_service)
def get_connected_space_service(subject_service: SubjectService) -> ConnectedSpaceService:
return ConnectedSpaceService(
subject_service.storage, subject_service.snowflakeGenerator, subject_service.principalService)
def get_report_service(subject_service: SubjectService) -> ReportService:
return ReportService(
subject_service.storage, subject_service.snowflakeGenerator, subject_service.principalService)
def ask_save_subject_action(
subject_service: SubjectService, principal_service: PrincipalService) -> Callable[[Subject], Subject]:
def action(subject: Subject) -> Subject:
subject.userId = principal_service.get_user_id()
subject.tenantId = principal_service.get_tenant_id()
subject.lastVisitTime = get_current_time_in_seconds()
if subject_service.is_storable_id_faked(subject.subjectId):
connect_id = subject.connectId
if is_blank(connect_id):
raise_400('Connected space id is required.')
connected_space_service: ConnectedSpaceService = get_connected_space_service(subject_service)
existing_connected_space: Optional[Subject] = connected_space_service.find_by_id(connect_id)
if existing_connected_space is None:
raise_400('Incorrect connected space id.')
elif existing_connected_space.tenantId != subject.tenantId or existing_connected_space.userId != subject.userId:
raise_403()
subject_service.redress_storable_id(subject)
# noinspection PyTypeChecker
subject: Subject = subject_service.create(subject)
else:
# noinspection PyTypeChecker
existing_subject: Optional[Subject] = subject_service.find_by_id(subject.subjectId)
if existing_subject is not None:
if existing_subject.tenantId != subject.tenantId:
raise_403()
if existing_subject.userId != subject.userId:
raise_403()
subject.connectId = existing_subject.connectId
# noinspection PyTypeChecker
subject: Subject = subject_service.update(subject)
return subject
return action
@router.get('/subject/name', tags=[UserRole.CONSOLE, UserRole.ADMIN], response_model=Subject)
async def load_subject_by_name(name: str,
principal_service: PrincipalService = Depends(get_console_principal)) -> Subject:
subject_service = get_subject_service(principal_service)
def action() -> Subject:
return subject_service.find_by_name(name)
return trans_readonly(subject_service, action)
@router.post('/subject', tags=[UserRole.CONSOLE, UserRole.ADMIN], response_model=Subject)
async def save_subject(
subject: Subject, principal_service: PrincipalService = Depends(get_console_principal)
) -> Subject:
subject_service = get_subject_service(principal_service)
action = ask_save_subject_action(subject_service, principal_service)
return trans(subject_service, lambda: action(subject))
@router.get('/subject/rename', tags=[UserRole.CONSOLE, UserRole.ADMIN], response_class=Response)
async def update_subject_name_by_id(
subject_id: Optional[SubjectId], name: Optional[str],
principal_service: PrincipalService = Depends(get_console_principal)
) -> None:
"""
rename subject will not increase the optimistic lock version
"""
if is_blank(subject_id):
raise_400('Subject id is required.')
subject_service = get_subject_service(principal_service)
# noinspection DuplicatedCode
def action() -> None:
existing_one = subject_service.find_tenant_and_user(subject_id)
if existing_one is None:
raise_404()
existing_tenant_id, existing_user_id = existing_one
if existing_tenant_id != principal_service.get_tenant_id():
raise_403()
elif existing_user_id != principal_service.get_user_id():
raise_403()
# noinspection PyTypeChecker
subject_service.update_name(
subject_id, name, principal_service.get_user_id(), principal_service.get_tenant_id())
trans(subject_service, action)
@router.get('/subject/delete', tags=[UserRole.CONSOLE, UserRole.ADMIN], response_class=Response)
async def delete_subject_by_id(
subject_id: Optional[SubjectId], principal_service: PrincipalService = Depends(get_console_principal)
) -> None:
if is_blank(subject_id):
raise_400('Subject id is required.')
subject_service = get_subject_service(principal_service)
# noinspection DuplicatedCode
def action() -> None:
# noinspection PyTypeChecker
existing_subject: Optional[Subject] = subject_service.find_by_id(subject_id)
if existing_subject is None:
raise_404()
if existing_subject.tenantId != principal_service.get_tenant_id():
raise_403()
if not principal_service.is_tenant_admin() and existing_subject.userId != principal_service.get_user_id():
raise_403()
subject_service.delete(subject_id)
report_service: ReportService = get_report_service(subject_service)
report_service.delete_by_subject_id(subject_id)
trans(subject_service, action)
class SubjectWithReports(Subject):
reports: List[Report] = []
@router.delete('/subject', tags=[UserRole.SUPER_ADMIN], response_model=SubjectWithReports)
async def delete_subject_by_id_by_super_admin(
subject_id: Optional[SubjectId] = None,
principal_service: PrincipalService = Depends(get_super_admin_principal)
) -> SubjectWithReports:
if not ask_tuple_delete_enabled():
raise_404('Not Found')
if is_blank(subject_id):
raise_400('Subject id is required.')
subject_service = get_subject_service(principal_service)
def action() -> SubjectWithReports:
# noinspection PyTypeChecker
subject: Subject = subject_service.delete(subject_id)
if subject is None:
raise_404()
report_service: ReportService = get_report_service(subject_service)
reports: List[Report] = report_service.delete_by_subject_id(subject_id)
return SubjectWithReports(**subject.dict(), reports=reports)
return trans(subject_service, action)
| 41.502825 | 124 | 0.737408 |
a93c59cdb6ee1b78cc1c59f343e289f1ca60fed7
| 210 |
py
|
Python
|
src/python/py-accepted/236A.py
|
cbarnson/UVa
|
0dd73fae656613e28b5aaf5880c5dad529316270
|
[
"Unlicense",
"MIT"
] | 2 |
2019-09-07T17:00:26.000Z
|
2020-08-05T02:08:35.000Z
|
src/python/py-accepted/236A.py
|
cbarnson/UVa
|
0dd73fae656613e28b5aaf5880c5dad529316270
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/python/py-accepted/236A.py
|
cbarnson/UVa
|
0dd73fae656613e28b5aaf5880c5dad529316270
|
[
"Unlicense",
"MIT"
] | null | null | null |
#! python
# Problem # : 236A
# Created on : 2019-01-14 23:41:28
def Main():
cnt = len(set(input()))
print('IGNORE HIM!' if cnt & 1 else 'CHAT WITH HER!')
if __name__ == '__main__':
Main()
| 15 | 57 | 0.561905 |
9e058e1d235494a1c0cea3fb6861559ed48cbf3a
| 959 |
py
|
Python
|
Packs/CommonScripts/Scripts/FileCreateAndUploadV2/FileCreateAndUploadV2.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 1 |
2021-11-02T05:36:38.000Z
|
2021-11-02T05:36:38.000Z
|
Packs/CommonScripts/Scripts/FileCreateAndUploadV2/FileCreateAndUploadV2.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 61 |
2021-10-07T08:54:38.000Z
|
2022-03-31T10:25:35.000Z
|
Packs/CommonScripts/Scripts/FileCreateAndUploadV2/FileCreateAndUploadV2.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | null | null | null |
import base64
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
args = demisto.args()
filename = args.get('filename', '')
data = args.get('data', '')
data_encoding = args.get('data_encoding', 'raw')
entdy_id = args.get('entryId')
try:
if entdy_id:
res = demisto.executeCommand('getEntry', {'id': entdy_id})
if is_error(res):
raise DemistoException(get_error(res))
data = demisto.get(res[0], 'Contents')
if data_encoding == 'base64':
data = base64.b64decode(data)
elif data_encoding != 'raw':
raise ValueError(f'Invalid data encoding name: {data_encoding}')
return_results(fileResult(filename, data))
except Exception as e:
return_error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| 28.205882 | 76 | 0.607925 |
f57fecf4e45ef37fa99cd8f2c73cf05ae15afdd7
| 13,706 |
py
|
Python
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import io
import pytest
from CommonServerPython import DemistoException, FeedIndicatorType, CommandResults
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
SOCRADAR_API_ENDPOINT = 'https://platform.socradar.com/api'
CALCULATE_DBOT_SCORE_INPUTS = [
(900, 3),
(800, 2),
(450, 2),
(300, 1),
(100, 1),
(0, 0),
]
def test_test_module(requests_mock):
"""Tests the test_module validation command.
"""
from SOCRadarThreatFusion import Client, test_module
mock_socradar_api_key = "APIKey"
suffix = f'threat/analysis/check/auth?key={mock_socradar_api_key}'
mock_response = util_load_json('test_data/check_auth_response.json')
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
response = test_module(client)
assert response == 'ok'
def test_test_module_handles_authorization_error(requests_mock):
"""Tests the test_module validation command authorization error.
"""
from SOCRadarThreatFusion import Client, test_module, MESSAGES
mock_socradar_api_key = "WrongAPIKey"
suffix = f'threat/analysis/check/auth?key={mock_socradar_api_key}'
mock_response = util_load_json('test_data/check_auth_response_auth_error.json')
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response, status_code=401)
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(DemistoException, match=MESSAGES['AUTHORIZATION_ERROR']):
test_module(client)
def test_ip_command(requests_mock):
"""Tests the ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, ip_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_ip_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'ip': '1.1.1.1'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = ip_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_ip_expected_output.json')
expected_context = util_load_json('test_data/score_ip_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for IP: 1.1.1.1' in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_ip_command_handles_incorrect_entity_type():
"""Tests the ip_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, ip_command
mock_socradar_api_key = "APIKey"
mock_args = {'ip': 'INCORRECT IP ADDRESS'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
ip_command(
client=client,
args=mock_args,
)
def test_domain_command(requests_mock):
"""Tests the domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, domain_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_domain_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'domain': 'paloaltonetworks.com'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = domain_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_domain_expected_output.json')
expected_context = util_load_json('test_data/score_domain_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for domain: paloaltonetworks.com' in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_domain_command_handles_incorrect_entity_type():
"""Tests the domain_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, domain_command
mock_socradar_api_key = "APIKey"
mock_args = {'domain': 'INCORRECT DOMAIN'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
domain_command(
client=client,
args=mock_args,
)
def test_file_command(requests_mock):
"""Tests the file_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, file_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_hash_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'file': '3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = file_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_hash_expected_output.json')
expected_context = util_load_json('test_data/score_hash_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792' \
in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_file_command_handles_incorrect_entity_type():
"""Tests the file_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, file_command
mock_socradar_api_key = "APIKey"
mock_args = {'file': 'INCORRECT HASH'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
file_command(
client=client,
args=mock_args,
)
def test_score_ip(requests_mock):
"""Tests the score_ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_ip_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_ip_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'ip': '1.1.1.1'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_ip_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_ip_expected_output.json')
expected_context = util_load_json('test_data/score_ip_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for IP: 1.1.1.1' in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_ip_handles_incorrect_entity_type():
"""Tests the score_ip_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_ip_command
mock_socradar_api_key = "APIKey"
mock_args = {'ip': 'INCORRECT IP ADDRESS'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_ip_command(
client=client,
args=mock_args,
)
def test_score_domain(requests_mock):
"""Tests the score_domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_domain_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_domain_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'domain': 'paloaltonetworks.com'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_domain_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_domain_expected_output.json')
expected_context = util_load_json('test_data/score_domain_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for domain: paloaltonetworks.com' in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_domain_handles_incorrect_entity_type():
"""Tests the score_domain_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_domain_command
mock_socradar_api_key = "APIKey"
mock_args = {'domain': 'INCORRECT DOMAIN'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_domain_command(
client=client,
args=mock_args,
)
def test_score_hash(requests_mock):
"""Tests the score_hash_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_hash_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_hash_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'hash': '3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_hash_command(
client=client,
args=mock_args
)
expected_output = util_load_json('test_data/score_hash_expected_output.json')
expected_context = util_load_json('test_data/score_hash_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792' \
in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_hash_handles_incorrect_entity_type():
"""Tests the score_hash_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_hash_command
mock_socradar_api_key = "APIKey"
mock_args = {'hash': 'INCORRECT HASH'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_hash_command(
client=client,
args=mock_args,
)
@pytest.mark.parametrize('socradar_score, dbot_score', CALCULATE_DBOT_SCORE_INPUTS)
def test_calculate_dbot_score(socradar_score, dbot_score):
from SOCRadarThreatFusion import calculate_dbot_score
assert calculate_dbot_score(socradar_score) == dbot_score
def test_map_indicator_type():
from SOCRadarThreatFusion import map_indicator_type
assert FeedIndicatorType.IP == map_indicator_type('ipv4')
assert FeedIndicatorType.IPv6 == map_indicator_type('ipv6')
assert FeedIndicatorType.Domain == map_indicator_type('hostname')
assert FeedIndicatorType.File == map_indicator_type('hash')
assert None is map_indicator_type('IP')
assert None is map_indicator_type('invalid')
| 31.292237 | 121 | 0.721509 |
27163f24d844c92ccb536a87d1157a8f38afad46
| 74 |
py
|
Python
|
Python/M01_ProgrammingBasics/L04_ForLoop/Lab/Solutions/P03_EvenPowersOf2.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L04_ForLoop/Lab/Solutions/P03_EvenPowersOf2.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L04_ForLoop/Lab/Solutions/P03_EvenPowersOf2.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
n = int(input())
for number in range(0, n + 1, 2):
print(2 ** number)
| 18.5 | 33 | 0.554054 |
fdc3f82bfe8cd9cd0dfc5da504d65b190d0f8e4a
| 491 |
py
|
Python
|
profil/migrations/0010_auto_20191219_1938.py
|
AndiBr/ffksk
|
ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99
|
[
"MIT"
] | null | null | null |
profil/migrations/0010_auto_20191219_1938.py
|
AndiBr/ffksk
|
ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99
|
[
"MIT"
] | 14 |
2018-09-12T06:59:55.000Z
|
2020-02-26T07:17:48.000Z
|
profil/migrations/0010_auto_20191219_1938.py
|
AndiBr/ffksk
|
ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-12-19 18:38
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profil', '0009_auto_20190112_2032'),
]
operations = [
migrations.AlterField(
model_name='kioskuser',
name='aktivBis',
field=models.DateField(default=datetime.date(1999, 12, 31)),
),
]
| 22.318182 | 72 | 0.631365 |
e3014b4ac863fdcc0113d57c274d4b8a383198e1
| 649 |
py
|
Python
|
command/generate_queries.py
|
Janrupf/airport-db-seeding
|
768a9373f02ede5bf613d09270d2fbe84de37a97
|
[
"MIT"
] | null | null | null |
command/generate_queries.py
|
Janrupf/airport-db-seeding
|
768a9373f02ede5bf613d09270d2fbe84de37a97
|
[
"MIT"
] | null | null | null |
command/generate_queries.py
|
Janrupf/airport-db-seeding
|
768a9373f02ede5bf613d09270d2fbe84de37a97
|
[
"MIT"
] | null | null | null |
from pathlib import Path
def run(data):
scripts_path = Path("scripts/queries")
scripts_path.mkdir(parents=True, exist_ok=True)
target_path = Path("scripts/queries.sql")
contents = list()
for child in scripts_path.iterdir():
if child.is_file():
with open(child) as f:
content = f.readlines()
content.insert(0, f"-- table: {child.name}\n")
print(content)
contents.append(content)
with open(target_path, "w") as f:
for file in contents:
for line in file:
f.write(f"{line}")
f.write("\n\n\n")
| 28.217391 | 62 | 0.550077 |
4733ace140f79fe7cd170dae520a781ee4570091
| 1,012 |
py
|
Python
|
x2paddle/project_convertor/pytorch/torch2paddle/__init__.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 559 |
2019-01-14T06:01:55.000Z
|
2022-03-31T02:52:43.000Z
|
x2paddle/project_convertor/pytorch/torch2paddle/__init__.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 353 |
2019-05-07T13:20:03.000Z
|
2022-03-31T05:30:12.000Z
|
x2paddle/project_convertor/pytorch/torch2paddle/__init__.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 241 |
2018-12-25T02:13:51.000Z
|
2022-03-27T23:21:43.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .io import *
from .layer import *
from .tensor import *
from .optimizer import *
from .nn import *
from .nn_utils import *
from .nn_functional import *
from .nn_init import *
from .varbase import *
from .vision_transforms import *
from .device import *
from .vision_utils import *
from .vision_datasets import *
from .ops import *
from .learning_rate_scheduler import *
from .parambase import *
| 32.645161 | 74 | 0.76087 |
47d799e33ad3c0b215a7bc41a0380faf90812fe8
| 2,271 |
py
|
Python
|
test/test_npu/test_network_ops/test_conv_tbc.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_conv_tbc.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_conv_tbc.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
import torch.nn as nn
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestConvTbc(TestCase):
def op_exec_cpu(self, input1, weight, bias, pad):
cpu_output = torch.conv_tbc(input1, weight, bias, pad)
cpu_output = cpu_output.numpy().astype('float16')
return cpu_output
def op_exec_npu(self, input1, weight, bias, pad):
input1 = input1.to("npu")
weight = weight.to("npu")
bias = bias.to("npu")
npu_output = torch.conv_tbc(input1, weight, bias, pad)
npu_output = npu_output.to("cpu")
npu_output = npu_output.numpy().astype('float16')
return npu_output
def test_conv_tbc_shape_format(self, device):
inputs = np.random.uniform(0, 2, [5, 1, 2])
npu_input = torch.from_numpy(inputs.astype('float16'))
cpu_input = torch.from_numpy(inputs)
weights = np.random.uniform(0, 2, [1, 2, 2])
npu_weight = torch.from_numpy(weights.astype('float16'))
cpu_weight = torch.from_numpy(weights)
bias = np.random.uniform(0, 2, [2])
npu_bias = torch.from_numpy(bias.astype('float16'))
cpu_bias = torch.from_numpy(bias)
pad = 1
cpu_output = self.op_exec_cpu(cpu_input, cpu_weight, cpu_bias, pad)
npu_output = self.op_exec_npu(npu_input, npu_weight, npu_bias, pad)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestConvTbc, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 38.491525 | 75 | 0.704095 |
d056390316c4c3b08e77129ff3eda9ac506e66fc
| 6,486 |
py
|
Python
|
School/G11/Pizza.py
|
MastaCoder/Projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 5 |
2018-10-11T01:55:40.000Z
|
2021-12-25T23:38:22.000Z
|
School/G11/Pizza.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | null | null | null |
School/G11/Pizza.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 1 |
2019-02-22T14:42:50.000Z
|
2019-02-22T14:42:50.000Z
|
running = True
# Global Options
SIZES = {'small': [7.58, 0], 'medium': [9.69, 1], 'large': [12.09, 2], 'jumbo': [17.99, 3], 'party': [20.29, 4]} # [Price, ID]
TOPPINGS = {'pepperoni': [0, False], 'bacon': [0, True], 'sausage': [0, False], 'mushroom': [1, False], 'black olive': [1, False], 'green pepper': [1, False]} # [Group, Special]
TOPPING_PRICES = [[1.6, 2.05, 2.35, 3.15, 3.30], [1.95, 2.25, 2.65, 3.49, 3.69]] # 0: No Special - 1: Special [Inc size]
def ask(question, options, show = True):
answer = False
o = "" # Choices to show, only if show = true
if show:
o = " (" + ', '.join([str(lst).title() for lst in options]) + ")"
while True:
a = input(question + o + ": ").lower() # User input
if a.isdigit(): # Allow users to pick via number too
a = int(a) - 1 # Set to an int
if a in range(0, len(options)):
a = options[a] # Set to value so next function completes
if a in options: # Is option valid?
answer = a
break
print("Not a valid option, try again!") # Nope
return answer # Return answer
def splitter():
print("---------------------------------------------------")
# Only do while running, used for confirmation.
while running:
PIZZA = {} # Pizza config
DELIVERY = {} # Delivery config
TOTAL = 0
# Start title
print(" MAKAN PIZZA ")
splitter()
# Delivery or pickup?
DELIVERY['type'] = ask("Type of order", ['delivery', 'pickup'])
while True:
DELIVERY['location'] = input("Location of " + DELIVERY['type'] + ": ") # Need a location
# Did they leave it blank?
if DELIVERY['location']:
break
else:
print("Please enter a valid location!")
# If delivery, ask for special instructions
if DELIVERY['type'] == "delivery":
DELIVERY['special_instructions'] = input("Special instructions (Blank for None): ")
# Do they have special instructions?
if not DELIVERY['special_instructions']:
DELIVERY['special_instructions'] = "None"
splitter()
# Size of the pizza
PIZZA['size'] = ask("Select the size", ['small', 'medium', 'large', 'jumbo', 'party'])
# Dough type
PIZZA['dough'] = ask("Select dough type", ['regular', 'whole wheat', 'carbone'])
# Type of sauce
PIZZA['sauce'] = ask("Type of sauce", ['tomato', 'pesto', 'bbq sauce', 'no sauce'])
# Type of primary cheese
PIZZA['cheese'] = ask("Type of cheese", ['mozzarella', 'cheddar', 'dairy-free', 'no cheese'])
splitter()
# Pick your topping section!
print("Pick your Toppings!", end = "")
count = -1 # Category count
for i in TOPPINGS:
start = "" # Used for the comma
# Check category and print to new line if so.
if TOPPINGS[i][0] != count:
count = TOPPINGS[i][0]
print("\n--> ", end = "")
else:
start = ", " # Split toppings
print(start + i.title(), end = "") # Print topping
# Special topping?
if TOPPINGS[i][1]:
print(" (*)", end = "")
print() # New line
# Extra functions
print("--> Typing in list will show current toppings.")
print("--> Retyping in a topping will remove it.")
print("--> Press enter once you're done!")
# Topping selector
PIZZA['toppings'] = []
while True:
top = input("Pick your topping: ") # Get input
if top == "list": # Want a list of toppings.
if not len(PIZZA['toppings']): # Do they have toppings?
print("You have no toppings!")
else:
for i in PIZZA['toppings']: # Go through and print toppings.
print("--> ", i.title())
elif not top: # Done picking.
break
else: # Picked a topping?
if top.endswith('s'): # If it ends with s, remove and check (sausages -> sausage)
top = top[:-1]
if top in TOPPINGS:
if top in PIZZA['toppings']:
print("Topping", top.title(), "has been removed from your order.")
PIZZA['toppings'].remove(top) # Remove topping
else:
print("Topping", top.title(), "has been added to your order.")
PIZZA['toppings'].append(top) # Add topping
else:
print("That topping does not exist!")
splitter()
print(" MAKAN PIZZA ORDER CONFIRMATION ")
splitter()
# Calculate the price of order and print.
print(PIZZA['size'].title() + " Pizza (CAD$" + str(SIZES[PIZZA['size']][0]) + ")")
TOTAL += SIZES[PIZZA['size']][0] # Price of size
# Free Things
print("--> " + PIZZA['dough'].title() + " (FREE)")
print("--> " + PIZZA['sauce'].title() + " (FREE)")
print("--> " + PIZZA['cheese'].title() + " (FREE)")
# Toppings
if PIZZA['toppings']:
print("--> Toppings:") # If they have any toppings, print title
# Go through all the toppings
for i in PIZZA['toppings']:
if TOPPINGS[i][1]:
tpp = TOPPING_PRICES[1][SIZES[PIZZA['size']][1]] # Special pricing
else:
tpp = TOPPING_PRICES[0][SIZES[PIZZA['size']][1]] # Non-Special pricing
print(" --> " + i.title() + " (CAD$" + format(tpp, '.2f') + ")") # Print the topping name and price
TOTAL += tpp # Add price of topping to total
splitter()
print("Sub-Total: CAD$" + format(TOTAL, '.2f')) # total
# Delivery has delivery fee (Fixed $3.50)
if DELIVERY['type'] == "delivery":
print("Delivery Fee: CAD$3.50")
TOTAL += 3.5
# Calculate and add tax
TAX = round(TOTAL * 0.13, 2)
TOTAL += TAX
print("Tax: CAD$" + format(TAX, '.2f'))
print("Total: CAD$" + format(TOTAL, '.2f'))
splitter()
CONFIRM = ask("Do you wish to confirm this order", ['yes', 'no'])
splitter()
# Done?
if CONFIRM == "yes":
break
# Final order print
print("Your order is on the way, we'll be there in 45 minutes or you get a refund!")
if DELIVERY['type'] == "delivery": # Did they get delivery?
print("Delivery to:", DELIVERY['location'], "(Special Instructions:", DELIVERY['special_instructions'] + ")")
else:
print("Pickup at:", DELIVERY['location'])
splitter()
| 37.062857 | 177 | 0.533611 |
de3b726dcfd836a816338d387f0a4c0586d9837a
| 8,238 |
py
|
Python
|
Transformer/Transformer_.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
Transformer/Transformer_.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
Transformer/Transformer_.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
"""
Transformer 实现
Author: Bao Wenjie
Date: 2021/3/7
REF: http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
from torch.autograd import Variable
from torch.nn.modules.container import Sequential
from torch.nn.modules.normalization import LayerNorm
class EncoderDecoder(nn.Module):
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask)
class Generator(nn.Module):
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, X):
return F.log_softmax(self.proj(X), dim=-1)
def clones(module, N):
""" 将一个模型拷贝多次叠加 """
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
""" 编码器 """
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, X, mask):
for layer in self.layers:
X = layer(X, mask)
return self.norm(X)
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, X):
mean = X.mean(-1, keepdim=True)
std = X.std(-1, keepdim=True)
return self.a_2 * (X - mean) / (std + self.eps) + self.b_2
class SubLayerConnection(nn.Module):
""" 残差连接 """
def __init__(self, size, dropout):
"""
Param
-----
:size
:dropout
"""
super(SubLayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, X, sublayer):
return X + self.dropout(sublayer(self.norm(X)))
class EncoderLayer(nn.Module):
""" 编码器的一层 """
def __init__(self, size, self_attn, feed_forward, dropout):
"""
Param
-----
:size
:self_attn
:feed_forward
:dropout
"""
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SubLayerConnection(size, dropout), 2)
self.size = size
def forward(self, X, mask):
X = self.sublayer[0](X, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](X, self.feed_forward)
class Decoder(nn.Module):
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, X, memory, src_mask, tgt_mask):
for layer in self.layers:
X = layer(X, memory, src_mask, tgt_mask)
return self.norm(X)
class DecoderLayer(nn.Module):
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SubLayerConnection(size, dropout), 3)
def forward(self, X, memory, src_mask, tgt_mask):
m = memory
X = self.sublayer[0](X, lambda x:self.self_attn(x, x, x, tgt_mask))
X = self.sublayer[1](X, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](X, self.feed_forward)
def subsequent_mask(size):
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k = 1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / torch.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -100)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h*self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, X):
return self.w_2(self.dropout(F.relu(self.w_1(X))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, X):
return self.lut(X) * torch.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout, max_len=5000):
"""
位置编码
Param
-----
:d_model 模型的维度(输出的编码维度)
:dropout
:max_len 最大句子长度
"""
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
# (max_len, 1)
div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
# (d_model/2)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
# (1, max_len, d_model)
self.register_buffer('pe', pe)
def forward(self, X):
X = X + Variable(self.pe[:, :X.size(1)], requires_grad=False)
return self.dropout(X)
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 5))
pe = PositionalEncoding(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0,:,4:8].data.numpy())
plt.show()
def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
"""
构造模型
Param
-----
:src_vocab
:tgt_vocab
:N
:d_model 模型维度
:d_ff
:h 多头注意力的头数
:dropout
"""
c = copy.deepcopy
attn = MultiHeadAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab)
)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
| 31.204545 | 138 | 0.615319 |
7251a9cddcac3ca7253315617b552e7fb5136eae
| 471 |
py
|
Python
|
detect_anomalies.py
|
JoelHaubold/NzmLabeling
|
363c027ce6584899025f79ea4758e1808b2aa96a
|
[
"MIT"
] | null | null | null |
detect_anomalies.py
|
JoelHaubold/NzmLabeling
|
363c027ce6584899025f79ea4758e1808b2aa96a
|
[
"MIT"
] | null | null | null |
detect_anomalies.py
|
JoelHaubold/NzmLabeling
|
363c027ce6584899025f79ea4758e1808b2aa96a
|
[
"MIT"
] | null | null | null |
import logarithmoforecast
import pandas as pd
from pathlib import Path
def create_ml_dataframe(station_name, phase, pickle_dir=Path('pickles')):
path = pickle_dir / station_name
df_ml = pd.read_pickle(path / ("h_phase"+str(phase)))
df_ml.drop(columns=['ServiceDeliveryPoint'])
print(df_ml)
def main():
station_name = 'NW000000000000000000000NBSNST0888'
test_dir = Path('testPickles')
create_ml_dataframe(station_name, 0, test_dir)
main()
| 24.789474 | 73 | 0.740977 |
a0ccc9c86f74e1c212fb9075845bda4af158fdd4
| 3,641 |
py
|
Python
|
official/cv/mobilenetv2_quant/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2021-07-08T13:10:42.000Z
|
2021-11-08T02:48:57.000Z
|
official/cv/mobilenetv2_quant/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
official/cv/mobilenetv2_quant/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Export MobilenetV2 on ImageNet"""
import argparse
import numpy as np
import mindspore
from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export
from mindspore.compression.common import QuantDtype
from mindspore.compression.quant import QuantizationAwareTraining
from mindspore.compression.quant.quantizer import OptimizeOption
from src.mobilenetV2 import mobilenetV2
from src.mobilenetv2_mix_quant import mobilenetv2_mix_quant
from src.config import config_quant
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR"], default="MINDIR", help="file format")
parser.add_argument('--device_target', type=str, default=None, help='Run device target')
parser.add_argument('--optim_option', type=str, default="QAT", help='OptimizeOption')
args_opt = parser.parse_args()
if __name__ == '__main__':
cfg = config_quant(args_opt.device_target)
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target, save_graphs=False)
if args_opt.optim_option == "LEARNED_SCALE":
# define fusion network
network = mobilenetv2_mix_quant(num_classes=cfg.num_classes)
# convert fusion network to quantization aware network
quant_optim_otions = OptimizeOption.LEARNED_SCALE
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[True, False],
symmetric=[True, True],
narrow_range=[True, True],
quant_dtype=(QuantDtype.INT4, QuantDtype.INT8),
freeze_bn=0,
quant_delay=0,
one_conv_fold=True,
optimize_option=quant_optim_otions)
else:
# define fusion network
network = mobilenetV2(num_classes=cfg.num_classes)
# convert fusion network to quantization aware network
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[True, False],
symmetric=[True, False])
network = quantizer.quantize(network)
# load checkpoint
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(network, param_dict)
# export network
print("============== Starting export ==============")
inputs = Tensor(np.ones([1, 3, cfg.image_height, cfg.image_width]), mindspore.float32)
export(network, inputs, file_name="mobilenetv2_quant", file_format=args_opt.file_format,
quant_mode='QUANT', mean=0., std_dev=48.106)
print("============== End export ==============")
| 49.876712 | 111 | 0.642131 |
9d606bc58b436b44d159a2d4a2bfa6fd8143c76e
| 871 |
py
|
Python
|
savageml/utility/loss_functions.py
|
savagewil/SavageML
|
d5aa9a5305b5de088e3bf32778252c877faec41d
|
[
"MIT"
] | null | null | null |
savageml/utility/loss_functions.py
|
savagewil/SavageML
|
d5aa9a5305b5de088e3bf32778252c877faec41d
|
[
"MIT"
] | null | null | null |
savageml/utility/loss_functions.py
|
savagewil/SavageML
|
d5aa9a5305b5de088e3bf32778252c877faec41d
|
[
"MIT"
] | null | null | null |
from enum import Enum
import numpy as np
def mean_squared_error(observed_value: np.ndarray, predicted_value: np.ndarray, axis: tuple = None) -> np.ndarray:
if axis is None:
return np.mean(np.square(np.subtract(observed_value, predicted_value)))
else:
return np.mean(np.square(np.subtract(observed_value, predicted_value)), axis=axis, keepdims=True)
def mean_squared_error_derivative(observed_value: np.ndarray, predicted_value: np.ndarray, axis: tuple = None) -> np.ndarray:
if axis is None:
return np.multiply(np.mean(np.subtract(observed_value, predicted_value)), 2.0)
else:
return np.multiply(np.mean(np.subtract(observed_value, predicted_value), axis=axis, keepdims=True), 2.0)
class LossFunctions:
MSE = mean_squared_error
class LossFunctionDerivatives:
MSE_DERIVATIVE = mean_squared_error_derivative
| 33.5 | 125 | 0.746269 |
5f1babfeaa215707d37f84a6fbfd72f88443e789
| 382 |
py
|
Python
|
IVTa/2014/SAVCHENKOV_A_M/task_1_28.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/SAVCHENKOV_A_M/task_1_28.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/SAVCHENKOV_A_M/task_1_28.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 1. Вариант 28
# Напишите программу, которая будет сообщать род деятельности и псевдоним под
# которым скрывается Норма Бейкер. После вывода информации программа должна
# дожидаться пока пользователь нажмет Enter для выхода.
print("Норма Бейкер, более известная как Мэрилин Монро - американская \nкиноактриса, певица и секс-символ.")
input("\nНажмите Enter для выхода...")
| 54.571429 | 108 | 0.795812 |
26786c4a1069b5de398f71e5114d509f1670b3b0
| 143 |
py
|
Python
|
hardware/chip/rtl872xd/hal/hal_test/ucube.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 4,538 |
2017-10-20T05:19:03.000Z
|
2022-03-30T02:29:30.000Z
|
hardware/chip/rtl872xd/hal/hal_test/ucube.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 1,088 |
2017-10-21T07:57:22.000Z
|
2022-03-31T08:15:49.000Z
|
hardware/chip/rtl872xd/hal/hal_test/ucube.py
|
willianchanlovegithub/AliOS-Things
|
637c0802cab667b872d3b97a121e18c66f256eab
|
[
"Apache-2.0"
] | 1,860 |
2017-10-20T05:22:35.000Z
|
2022-03-27T10:54:14.000Z
|
src = Split('''
hal_test.c
''')
component = aos_component('hal_test', src)
component.add_cflags('-Wall')
component.add_cflags('-Werror')
| 15.888889 | 42 | 0.685315 |
26e3e51359c7f6d973f7e33c603f8cd613694d58
| 2,545 |
py
|
Python
|
DataStructure/U12/U12_79.py
|
qiaw99/Data-Structure
|
3b1cdce96d4f35329ccfec29c03de57378ef0552
|
[
"MIT"
] | 1 |
2019-10-29T08:21:41.000Z
|
2019-10-29T08:21:41.000Z
|
DataStructure/U12/U12_79.py
|
qiaw99/Data-Structure
|
3b1cdce96d4f35329ccfec29c03de57378ef0552
|
[
"MIT"
] | null | null | null |
DataStructure/U12/U12_79.py
|
qiaw99/Data-Structure
|
3b1cdce96d4f35329ccfec29c03de57378ef0552
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
#string = sys.argv[1]
def initialize():
string = "1 2 3, 1 -2, 2 -3"
clauses = string.split(',')
print("Boolean Formel: ", clauses)
length = len(string)
global ls, substitution, value
value = []
substitution = [0, 1]
ls = [None] * length
status = False
counter = 0
for k in clauses:
for temp in k:
if(temp == ' '):
pass
elif(temp == '-'):
status = True
else:
if(status):
ls[counter] = -1 * int(temp)
else:
ls[counter] = int(temp)
counter += 1
status = False
ls[counter] = None
counter += 1
findElements()
# x is an integer
def substitute(x, value):
global ls
if value > 0:
status = True
else:
status = False
temp = []
for e in ls:
if(e == x):
temp.append(e)
# if 1 and x without negation, remove the whole clause
if(status):
removeAllNeighbors(ls.index(e))
else:
ls[ls.index(e)] = None
elif(e == -1 * x):
temp.append(e)
# if 1 and x with negation, remove x
if(status):
ls[ls.index(e)] = None
else:
removeAllNeighbors(ls.index(e))
else:
pass
def removeAllNeighbors(index):
global ls
while(index <= len(ls) - 1 and ls[index] != None):
ls[index] = None
index += 1
while(index >= 0 and ls[index] != None):
ls[index] = None
index -= 1
def isSatisfied():
global ls
temp = None
counter = 0
for x in ls:
if(x == None):
pass
else:
temp = x
counter += 1
if(counter == 1 or counter == 0):
return True
else:
return False
def findElements():
global ls, value
for x in ls:
if((x != None) and (x not in value) and ((-1 * x) not in value)):
value.append(x)
def process():
global value
for x in range(1, max(value) + 1):
for i in substitution:
substitute(x, i)
if(isSatisfied()):
return True
return False
def main():
global ls, value
initialize()
if(process()):
print("Bingo")
else:
print('Fuck')
print(ls)
if __name__ == '__main__':
main()
| 20.198413 | 73 | 0.463261 |
f8220b2d2e4a2f9c3b32b095852979df43be7131
| 1,008 |
py
|
Python
|
test/test_levenshtein.py
|
katsugeneration/schuss
|
30e2303aa0646f0a4424cbc4b5422acc58a06218
|
[
"Apache-2.0"
] | 6 |
2018-12-20T06:50:52.000Z
|
2021-12-16T13:08:38.000Z
|
test/test_levenshtein.py
|
katsugeneration/schuss
|
30e2303aa0646f0a4424cbc4b5422acc58a06218
|
[
"Apache-2.0"
] | 1 |
2020-06-29T07:00:51.000Z
|
2020-06-29T07:00:51.000Z
|
test/test_levenshtein.py
|
katsugeneration/schuss
|
30e2303aa0646f0a4424cbc4b5422acc58a06218
|
[
"Apache-2.0"
] | 1 |
2022-03-04T07:29:55.000Z
|
2022-03-04T07:29:55.000Z
|
from parameterized import parameterized
from nose.tools import assert_equal, assert_true
import os
import sys
dir_path = os.path.dirname(os.path.abspath(__file__))
module_path = os.path.join(dir_path, "../src")
if module_path not in sys.path:
sys.path.append(module_path)
os.chdir(dir_path)
class TestSPTokenizer:
def test_init(self):
from distance.levenshtein import Levenshtein
Levenshtein()
def test_measure(self):
from distance.levenshtein import Levenshtein
l = Levenshtein()
assert_equal(0, l.measure("あいう", "あいう"))
assert_equal(1, l.measure("あいう", "あい"))
assert_equal(1, l.measure("いう", "あいう"))
assert_equal(1, l.measure("あいう", "あえう"))
assert_equal(2, l.measure("あいう", "あ"))
assert_equal(2, l.measure("あいう", "あいうえお"))
assert_equal(2, l.measure("あいう", "あえいうお"))
assert_equal(2, l.measure("あ", "愛"))
assert_equal(2, l.measure("愛", "「"))
assert_equal(1, l.measure("あ", "「"))
| 31.5 | 53 | 0.641865 |
f89bb5eabd945384434347dd1427bbd72171300c
| 4,293 |
py
|
Python
|
gdp1/assignment_4/hanoi.py
|
einekatze/gdp1-homework
|
28889dfa1863e1f1dee164e5b465dddb1cfacfcc
|
[
"MIT"
] | null | null | null |
gdp1/assignment_4/hanoi.py
|
einekatze/gdp1-homework
|
28889dfa1863e1f1dee164e5b465dddb1cfacfcc
|
[
"MIT"
] | null | null | null |
gdp1/assignment_4/hanoi.py
|
einekatze/gdp1-homework
|
28889dfa1863e1f1dee164e5b465dddb1cfacfcc
|
[
"MIT"
] | null | null | null |
import curses
import sys
from os import path
class TowerOfHanoi:
def __init__(self, stdscr):
self.disks = 0
self.pegs = []
# For the curses-based UI.
self.stdscr = stdscr
self.x_base = 5
self.y_base = 2
def init_curses_colors(self):
curses.init_color(1, 894, 102, 110)
curses.init_color(2, 216, 494, 722)
curses.init_color(3, 302, 686, 290)
curses.init_color(4, 596, 306, 639)
curses.init_color(5, 1000, 498, 0)
curses.init_color(6, 1000, 1000, 200)
curses.init_color(7, 651, 337, 157)
curses.init_pair(1, 1, 0)
curses.init_pair(2, 2, 0)
curses.init_pair(3, 3, 0)
curses.init_pair(4, 4, 0)
curses.init_pair(5, 5, 0)
curses.init_pair(6, 6, 0)
curses.init_pair(7, 7, 0)
curses.init_pair(8, 8, 0)
def peg_color(self, disk):
return curses.color_pair(1 + disk % 8)
def display_peg(self, letter, peg, y, x):
s = self.stdscr
# Draw the peg itself.
for i in range(self.disks + 1):
s.addstr(y + i, x + self.disks, "|")
# Draw the disks on the peg.
for i in range(len(peg)):
y_offset = self.disks + y - i
x_offset = x + 1 + (self.disks - peg[i])
disk = "#" * (1 + 2 * (peg[i] - 1))
color = self.peg_color(peg[i])
s.addstr(y_offset, x_offset, disk, color)
# Draw the plate.
s.addstr(y + 1 + self.disks, x, "-" * (1 + self.disks * 2))
# Draw the letter beneath the peg.
s.addstr(y + 3 + self.disks, x + self.disks, letter)
def display_tower(self):
self.display_peg("A", self.pegs[0], self.y_base, self.x_base)
self.display_peg("B", self.pegs[1], self.y_base, self.x_base + self.disks * 2 + 3)
self.display_peg("C", self.pegs[2], self.y_base, self.x_base + 2 * (self.disks * 2 + 3))
def show_state_and_next_move(self, peg_a, peg_c):
self.stdscr.clear()
self.display_tower()
self.stdscr.addstr(self.y_base + self.disks + 5, self.x_base,
"Moving disk %d (= n) from peg %s to peg %s." % (peg_a[1][-1], peg_a[0], peg_c[0]))
self.stdscr.addstr(self.y_base + self.disks + 7, self.x_base,
"Press any key to show the next step...")
self.stdscr.getch()
def solve_tower(self, disks):
# Set up the display colors and hide the cursor.
self.init_curses_colors()
curses.curs_set(False)
# Generate the pegs. Disks are represented by numbers, where the largest number represents the biggest disk.
# The top of the stack is the end of the peg list.
self.disks = disks
self.pegs = []
self.pegs.append(list(range(disks, 0, -1)))
self.pegs.append([])
self.pegs.append([])
# Start solving the tower.
self.solve_step(disks, ("A", self.pegs[0]), ("B", self.pegs[1]), ("C", self.pegs[2]))
# Display the end result, wait for input, and restore the cursor.
self.stdscr.clear()
self.display_tower()
self.stdscr.addstr(self.y_base + self.disks + 5, self.x_base, "Done!")
self.stdscr.addstr(self.y_base + self.disks + 7, self.x_base, "Press Space to exit.")
while self.stdscr.getch() != ord(" "):
pass
curses.curs_set(True)
def solve_step(self, n, start, buffer, target):
if n > 0:
self.solve_step(n - 1, start, target, buffer)
self.show_state_and_next_move(start, target)
target[1].append(start[1].pop())
self.solve_step(n - 1, buffer, start, target)
def run(stdscr, disks):
tower = TowerOfHanoi(stdscr)
tower.solve_tower(disks)
def show_usage_and_exit():
basepath = path.basename(path.realpath(__file__))
print("Usage: %s number-of-disks" % basepath)
sys.exit(1)
def main():
disks = 0
if len(sys.argv) != 2:
show_usage_and_exit()
try:
disks = int(sys.argv[1])
except ValueError:
show_usage_and_exit()
if disks < 1:
show_usage_and_exit()
curses.wrapper(run, disks)
if __name__ == "__main__":
main()
| 28.62 | 116 | 0.566969 |
e4a060c55419c13e5662d1b033d518ce0494f5fc
| 890 |
py
|
Python
|
agents/nets/cnn2.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | 2 |
2021-01-07T01:10:49.000Z
|
2022-01-21T09:37:16.000Z
|
agents/nets/cnn2.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
agents/nets/cnn2.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input
class CNN2Net(Model):
def __init__(self, output_dim=3):
"""
Args:
output_dim: 网络需要三个输出,对应3个动作的累积回报
"""
super(CNN2Net, self).__init__()
self.c1 = Conv2D(filters=16, kernel_size=(4, 4), strides=2, activation='relu') # 卷积层
self.c2 = Conv2D(filters=32, kernel_size=(2, 2), strides=1, activation='relu') # 卷积层
self.flatten = Flatten()
self.d1 = Dense(64, activation='relu')
self.d3 = Dense(output_dim)
def call(self, x):
x = self.c1(x)
x = self.c2(x)
x = self.flatten(x)
x = self.d1(x)
# x = self.d2(x)
y = self.d3(x)
return y
def model(self):
x = Input(shape=(6, 6, 1))
return Model(inputs=[x], outputs=self.call(x))
| 27.8125 | 93 | 0.561798 |
90044b4795cabd524ecd61fd4425e141f39d9606
| 83 |
py
|
Python
|
examples/nowcoder/SQL7/apps.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/nowcoder/SQL7/apps.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/nowcoder/SQL7/apps.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.apps import AppConfig
class Sql7Config(AppConfig):
name = 'SQL7'
| 13.833333 | 33 | 0.73494 |
f2e7936fbf1f9c345615a76cbbee0a4ffa951e89
| 986 |
py
|
Python
|
ref/dockerfiles-master/kinesis/consumer.py
|
DavidSche/davidche.tools
|
739578f2f92ad9cc120a0e08c30b9910efcd0033
|
[
"Apache-2.0"
] | 2 |
2021-04-25T06:18:26.000Z
|
2021-11-14T15:49:51.000Z
|
ref/dockerfiles-master/kinesis/consumer.py
|
DavidSche/davidche.tools
|
739578f2f92ad9cc120a0e08c30b9910efcd0033
|
[
"Apache-2.0"
] | null | null | null |
ref/dockerfiles-master/kinesis/consumer.py
|
DavidSche/davidche.tools
|
739578f2f92ad9cc120a0e08c30b9910efcd0033
|
[
"Apache-2.0"
] | 1 |
2022-03-23T00:36:06.000Z
|
2022-03-23T00:36:06.000Z
|
import boto3
import json
import time
import os
client = boto3.Session(region_name='eu-west-1').client('kinesis', aws_access_key_id='', aws_secret_access_key='', endpoint_url='http://localhost:4567')
stream_details = client.describe_stream(StreamName='mystream')
shard_id = stream_details['StreamDescription']['Shards'][0]['ShardId']
response = client.get_shard_iterator(StreamName='mystream', ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')
shard_iterator = response['ShardIterator']
print("Starting Consuming at {}".format(time.strftime("%H:%m:%S")))
while True:
response = client.get_records(ShardIterator=shard_iterator, Limit=5)
if len(response['Records']) == 0:
print("Finshed Consuming at {}".format(time.strftime("%H:%m:%S")))
break
shard_iterator = response['NextShardIterator']
for record in response['Records']:
if 'Data' in record and len(record['Data']) > 0:
print(json.loads(record['Data']))
time.sleep(0.75)
| 39.44 | 151 | 0.713996 |
8429c53f7fd28307a11ce122a67b429fe703dee1
| 10,777 |
py
|
Python
|
src/main/python/view/plot_utils.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/view/plot_utils.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/view/plot_utils.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | 1 |
2021-04-14T00:45:38.000Z
|
2021-04-14T00:45:38.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from context import resource_manager
import pandas
import numpy
from tools import logger
import numpy as np
import matplotlib.pyplot as plt
log = logger.getLogger()
def plot_image_file(img):
plt.imshow(img)
plt.show()
def plot_image(narray, w='', h=''):
log.info("plot image array:" + str(narray.shape))
if w is not '':
narray = narray.reshape(w, h)
plt.imshow(narray)
plt.show()
def plot_rho_delta(rho, delta):
'''
Plot scatter diagram for rho-delta points
Args:
rho : rho list
delta : delta list
'''
log.info("PLOT: rho-delta plot")
plot_scatter_diagram(0, rho[1:], delta[1:], x_label='rho', y_label='delta', title='rho-delta')
# def plot_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', style_list=None):
# '''
# Plot scatter diagram
#
# Args:
# which_fig : which sub plot
# x : x array
# y : y array
# x_label : label of x pixel
# y_label : label of y pixel
# title : title of the plot
# '''
# styles =
# assert len(x) == len(y)
# if style_list != None:
# assert len(x) == len(style_list) and len(styles) >= len(set(style_list))
# plt.figure(which_fig)
# plt.clf()
# if style_list == None:
# plt.plot(x, y, styles[0])
# else:
# clses = set(style_list)
# xs, ys = {}, {}
# for i in range(len(x)):
# try:
# xs[style_list[i]].append(x[i])
# ys[style_list[i]].append(y[i])
# except KeyError:
# xs[style_list[i]] = [x[i]]
# ys[style_list[i]] = [y[i]]
# added = 1
# for idx, cls in enumerate(clses):
# if cls == -1:
# style = styles[0]
# added = 0
# else:
# style = styles[idx + added]
# plt.plot(xs[cls], ys[cls], style)
# plt.title(title)
# plt.xlabel(x_label)
# plt.ylabel(y_label)
# plt.ylim(bottom=0)
# plt.show()
def plot_dataframe_scatter_diagram(which_fig, data, x_label='x', y_label='y', title='title', label=None):
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = markers = ['.', # point
',', # pixel
'o', # circle
'v', # triangle down
'^', # triangle up
'<', # triangle_left
'>', # triangle_right
'1', # tri_down
'2', # tri_up
'3', # tri_left
'4', # tri_right
'8', # octagon
's', # square
'p', # pentagon
'*', # star
'h', # hexagon1
'H', # hexagon2
'+', # plus
'x', # x
'D', # diamond
'd', # thin_diamond
'|', # vline
]
# styles = []
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "color.csv").ix[:,
2]
plt.figure(which_fig)
plt.clf()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
plt.legend(loc='upper left')
plt.show()
def plot_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', label=None):
'''
Plot scatter diagram
Args:
which_fig : which sub plot
x : x array
y : y array
x_label : label of x pixel
y_label : label of y pixel
title : title of the plot
'''
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
3]
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
2]
assert len(x) == len(y)
if label != None:
assert len(x) == len(label) # and len(stylesMarker) >= len(set(label))
plt.figure(which_fig)
plt.clf()
if label == None:
plt.plot(x, y, styles[0])
else:
l = len(label)
labelSet = set(label)
k = 0
for i in labelSet:
xs = []
ys = []
for j in range(l):
if i == label[j]:
xs.append(x[j])
ys.append(y[j])
k = k + 1
try:
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
except:
log.fatal(stylesMarker)
log.fatal(stylesColors)
log.fatal(stylesMarker[k])
log.fatal(stylesColors[k])
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
exit()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
# plt.legend(loc='upper left')
plt.show()
def save_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', label=None,
path=resource_manager.Properties.getDefaultDataFold() + "result" + resource_manager.getSeparator() + "result.png"):
'''
Plot scatter diagram
Args:
which_fig : which sub plot
x : x array
y : y array
x_label : label of x pixel
y_label : label of y pixel
title : title of the plot
'''
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
3]
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
2]
assert len(x) == len(y)
if label != None:
assert len(x) == len(label) # and len(stylesMarker) >= len(set(label))
plt.figure(which_fig)
plt.clf()
if label == None:
plt.plot(x, y, styles[0])
else:
l = len(label)
labelSet = set(label)
k = 0
for i in labelSet:
xs = []
ys = []
for j in range(l):
if i == label[j]:
xs.append(x[j])
ys.append(y[j])
k = k + 1
try:
if k<=7:
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=stylesMarker[k],label=i)
else:
plt.scatter(xs, ys, c=stylesColors[k%100].strip(), marker=r"$ {} $".format(str(i)),label=i)
except:
log.fatal(stylesMarker)
log.fatal(stylesColors)
log.fatal(stylesMarker[k])
log.fatal(stylesColors[k])
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
exit()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
plt.savefig(path,dpi=900)
#plt.savefig(path)
plt.close()
def save_all_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', label=None,
path=resource_manager.Properties.getDefaultDataFold() + "result" + resource_manager.getSeparator() + "result.png"):
'''
Plot scatter diagram
Args:
which_fig : which sub plot
x : x array
y : y array
x_label : label of x pixel
y_label : label of y pixel
title : title of the plot
'''
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
3]
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
2]
assert len(x) == len(y)
if label != None:
assert len(x) == len(label) # and len(stylesMarker) >= len(set(label))
plt.figure(which_fig)
plt.clf()
if label == None:
plt.plot(x, y, styles[0])
else:
l = len(label)
labelSet = set(label)
k = 0
for i in labelSet:
xs = []
ys = []
for j in range(l):
if i == label[j]:
xs.append(x[j])
ys.append(y[j])
k = k + 1
try:
# if k<=7:
# plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=stylesMarker[k],label=i)
# else:
plt.scatter(xs, ys, c=stylesColors[k%100].strip(), marker=r"$ {} $".format(str(i)),label=i)
except:
log.fatal(stylesMarker)
log.fatal(stylesColors)
log.fatal(stylesMarker[k])
log.fatal(stylesColors[k])
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
exit()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
# plt.legend(loc='upper left')
plt.savefig(path+".jpg",dpi=900)
#plt.savefig(path+".jpg")
plt.close()
if __name__ == '__main__':
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([2, 3, 4, 5, 6, 2, 4, 8])
cls = np.array([1, 4, 2, 3, 5, 1, 1, 7])
plot_scatter_diagram(0, x, y, label=cls)
| 34.10443 | 141 | 0.461538 |
08138e27474e66b5810b4b252a33adee270963de
| 87 |
py
|
Python
|
Django/ballon/apps.py
|
ballon3/GRAD
|
c630e32272fe34ead590c04d8360169e02be87f1
|
[
"MIT"
] | null | null | null |
Django/ballon/apps.py
|
ballon3/GRAD
|
c630e32272fe34ead590c04d8360169e02be87f1
|
[
"MIT"
] | null | null | null |
Django/ballon/apps.py
|
ballon3/GRAD
|
c630e32272fe34ead590c04d8360169e02be87f1
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class BallonConfig(AppConfig):
name = 'ballon'
| 14.5 | 33 | 0.747126 |
d9fc1f3c4ad13786efc9302efb8fa7ea490ce0da
| 5,504 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/demo/user/purchase.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/demo/user/purchase.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/demo/user/purchase.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, random
from frappe.utils.make_random import how_many, get_random
from frappe.desk import query_report
from erpnext.setup.utils import get_exchange_rate
from erpnext.accounts.party import get_party_account_currency
from erpnext.exceptions import InvalidCurrency
from erpnext.stock.doctype.material_request.material_request import make_request_for_quotation
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import \
make_supplier_quotation as make_quotation_from_rfq
def work():
frappe.set_user(frappe.db.get_global('demo_purchase_user'))
if random.random() < 0.3:
report = "Items To Be Requested"
for row in query_report.run(report)["result"][:random.randint(1, 5)]:
item_code, qty = row[0], abs(row[-1])
mr = make_material_request(item_code, qty)
if random.random() < 0.3:
for mr in frappe.get_all('Material Request',
filters={'material_request_type': 'Purchase', 'status': 'Open'},
limit=random.randint(1,6)):
if not frappe.get_all('Request for Quotation',
filters={'material_request': mr.name}, limit=1):
rfq = make_request_for_quotation(mr.name)
rfq.transaction_date = frappe.flags.current_date
add_suppliers(rfq)
rfq.save()
rfq.submit()
# Make suppier quotation from RFQ against each supplier.
if random.random() < 0.3:
for rfq in frappe.get_all('Request for Quotation',
filters={'status': 'Open'}, limit=random.randint(1, 6)):
if not frappe.get_all('Supplier Quotation',
filters={'request_for_quotation': rfq.name}, limit=1):
rfq = frappe.get_doc('Request for Quotation', rfq.name)
for supplier in rfq.suppliers:
supplier_quotation = make_quotation_from_rfq(rfq.name, supplier.supplier)
supplier_quotation.save()
supplier_quotation.submit()
# get supplier details
supplier = get_random("Supplier")
company_currency = frappe.db.get_value("Company", "Wind Power LLC", "default_currency")
party_account_currency = get_party_account_currency("Supplier", supplier, "Wind Power LLC")
if company_currency == party_account_currency:
exchange_rate = 1
else:
exchange_rate = get_exchange_rate(party_account_currency, company_currency)
# make supplier quotations
if random.random() < 0.2:
from erpnext.stock.doctype.material_request.material_request import make_supplier_quotation
report = "Material Requests for which Supplier Quotations are not created"
for row in query_report.run(report)["result"][:random.randint(1, 3)]:
if row[0] != "'Total'":
sq = frappe.get_doc(make_supplier_quotation(row[0]))
sq.transaction_date = frappe.flags.current_date
sq.supplier = supplier
sq.currency = party_account_currency or company_currency
sq.conversion_rate = exchange_rate
sq.insert()
sq.submit()
frappe.db.commit()
# make purchase orders
if random.random() < 0.5:
from erpnext.stock.doctype.material_request.material_request import make_purchase_order
report = "Requested Items To Be Ordered"
for row in query_report.run(report)["result"][:how_many("Purchase Order")]:
if row[0] != "'Total'":
po = frappe.get_doc(make_purchase_order(row[0]))
po.supplier = supplier
po.currency = party_account_currency or company_currency
po.conversion_rate = exchange_rate
po.transaction_date = frappe.flags.current_date
po.insert()
po.submit()
frappe.db.commit()
if random.random() < 0.2:
make_subcontract()
def make_material_request(item_code, qty):
mr = frappe.new_doc("Material Request")
variant_of = frappe.db.get_value('Item', item_code, 'variant_of') or item_code
if frappe.db.get_value('BOM', {'item': variant_of, 'is_default': 1, 'is_active': 1}):
mr.material_request_type = 'Manufacture'
else:
mr.material_request_type = "Purchase"
mr.transaction_date = frappe.flags.current_date
mr.schedule_date = frappe.utils.add_days(mr.transaction_date, 7)
mr.append("items", {
"doctype": "Material Request Item",
"schedule_date": frappe.utils.add_days(mr.transaction_date, 7),
"item_code": item_code,
"qty": qty
})
mr.insert()
mr.submit()
return mr
def add_suppliers(rfq):
for i in range(2):
supplier = get_random("Supplier")
if supplier not in [d.supplier for d in rfq.get('suppliers')]:
rfq.append("suppliers", { "supplier": supplier })
def make_subcontract():
from erpnext.buying.doctype.purchase_order.purchase_order import make_stock_entry
item_code = get_random("Item", {"is_sub_contracted_item": 1})
if item_code:
# make sub-contract PO
po = frappe.new_doc("Purchase Order")
po.is_subcontracted = "Yes"
po.supplier = get_random("Supplier")
po.schedule_date = frappe.utils.add_days(frappe.flags.current_date, 7)
item_code = get_random("Item", {"is_sub_contracted_item": 1})
po.append("items", {
"item_code": item_code,
"schedule_date": frappe.utils.add_days(frappe.flags.current_date, 7),
"qty": random.randint(10, 30)
})
po.set_missing_values()
try:
po.insert()
except InvalidCurrency:
return
po.submit()
# make material request for
make_material_request(po.items[0].item_code, po.items[0].qty)
# transfer material for sub-contract
stock_entry = frappe.get_doc(make_stock_entry(po.name, po.items[0].item_code))
stock_entry.from_warehouse = "Stores - WPL"
stock_entry.to_warehouse = "Supplier - WPL"
stock_entry.insert()
| 35.057325 | 94 | 0.742914 |
8a0a0862cfd8814dd290af9a040daeea80778378
| 1,682 |
py
|
Python
|
tests/integration/server/search_api_tests.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/server/search_api_tests.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | 1 |
2019-07-08T22:32:43.000Z
|
2019-07-08T22:32:43.000Z
|
tests/integration/server/search_api_tests.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Authentication API Endpoint Test"""
import requests
def create_test_user(session):
"""Create a user and authenticate to use api endpoints during testing."""
create_user_input = {
"name": "Susan Susanson",
"username": "susan20",
"password": "123456",
"email": "[email protected]",
}
session.post("http://rbac-server:8000/api/users", json=create_user_input)
def test_search_api():
"""Tests the search api endpoint functions and returns a valid payload."""
with requests.Session() as session:
create_test_user(session)
search_query = {
"query": {
"search_input": "search input",
"search_object_types": ["role", "pack", "user"],
"page_size": "20",
"page": "2",
}
}
response = session.post("http://rbac-server:8000/api/search", json=search_query)
assert response.json()["data"] == {"roles": [], "packs": [], "users": []}
| 38.227273 | 88 | 0.615339 |
6a8786049d93e5d64909f980dc2b63e8b8fc9186
| 18,405 |
py
|
Python
|
Packs/CloudConvert/Integrations/CloudConvert/CloudConvert.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CloudConvert/Integrations/CloudConvert/CloudConvert.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CloudConvert/Integrations/CloudConvert/CloudConvert.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
import urllib3
from typing import Any, Dict
# Disable insecure warnings
urllib3.disable_warnings()
class Client(BaseClient):
@logger
def __init__(self, headers, verify=False, proxy=False):
url = 'https://api.cloudconvert.com/v2'
super().__init__(url, headers=headers, verify=verify, proxy=proxy)
@logger
def upload_url(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Upload the file given as url to the API's server, for later conversion.
Note - this operation is called 'import' by the API.
Args:
arguments: dict containing the request arguments, should contain the field 'url'
Returns:
dict containing the results of the upload action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
return self._http_request(
method='POST',
url_suffix='import/url',
data=arguments,
ok_codes=(200, 201, 422),
)
@logger
def upload_entry_id(self, file_path: str, file_name: str) -> Dict[str, Any]:
"""
Upload the file given as a war room entry id to the API's server, for later conversion
Note - this operation is called 'import' by the API.
Args:
file_path: path to given file, derived from the entry id
file_name: name of file, including format suffix
Returns:
dict containing the results of the upload action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
response_get_form = self._http_request(
method='POST',
url_suffix='import/upload'
)
form = dict_safe_get(response_get_form, ('data', 'result', 'form'), default_return_value={})
port_url = form.get('url')
params = form.get('parameters')
if port_url is None or params is None:
raise ValueError('Failed to initiate an upload operation')
file_dict = {'file': (file_name, open(file_path, 'rb'))}
self._http_request(
method='POST',
url_suffix=None,
full_url=port_url,
files=file_dict,
empty_valid_codes=[201, 204],
return_empty_response=True,
data=params
)
# As shown, this operation has two requests
# The data about the operation is within the first request's response,
# So in order to keep the operation's data, we should return the first request's response,
# But first we should remove fields that are no longer true, such as ones that indicates that
# The second request has not been done yet
if response_get_form.get('data'):
response_get_form.get('data').pop('message', None)
response_get_form.get('data').pop('result', None)
return response_get_form
@logger
def convert(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Convert a file to desired format, given the file was priorly uploaded to the API's server
Args:
arguments: dict containing the request arguments, should contain the fields 'task_id' and 'output_format'
Returns:
dict containing the results of the convert action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
arguments['input'] = arguments.pop('task_id')
return self._http_request(
method='POST',
url_suffix='convert',
data=arguments,
ok_codes=(200, 201, 422),
)
def check_status(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Check the status of a request sent to the API's server
Args:
arguments: dict containing the request arguments, should contain the field 'task_id'
Returns:
dict containing the results of the check status action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
task_id = arguments.get('task_id')
return self._http_request(
method='GET',
url_suffix=f'/tasks/{task_id}',
ok_codes=(200, 201, 422),
)
@logger
def download_url(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Download a converted file to a url
Note - this operation is called 'export' by the API.
Args:
arguments:
dict containing the request arguments, should contain the field 'task_id' of the desired file
Returns:
dict containing the results of the download action as returned from the API (status, task ID, etc.)
if the action was complete, the result url will be a part of this dict. If the request is pending,
one should retrieve the url via the 'check_status' command
``Dict[str, Any]``
"""
arguments['input'] = arguments.pop('task_id')
return self._http_request(
method='POST',
url_suffix='/export/url',
data=arguments,
ok_codes=(200, 201, 422),
)
@logger
def get_file_from_url(self, url: str):
"""
Call a GET http request in order to get the file data given as url
Args:
url: url containing a file
Returns:
request response, containing the data of the file
"""
# Saving the headers of this client instance
# The HTTP request that gets the file data needs to have no headers
# Passing an empty dictionary to _http_request cause it to use this client's headers by default
session_headers = self._headers
self._headers = {}
try:
results = self._http_request(
method='GET',
url_suffix=None,
full_url=url,
headers={},
resp_type='response',
)
return results.content
finally:
self._headers = session_headers
@logger
def raise_error_if_no_data(results: Dict[str, Any]):
"""
This function checks if No 'data' field was returned from the request, meaning the input was invalid
Args:
results: a dict containing the request's results
Returns:
raises error if there is no 'data' field, with the matching error message returned from the server
if no error message was given from the server, suggests the other optional errors
"""
if results.get('data') is None:
if results.get('message'):
raise ValueError(results.get('message'))
else:
raise ValueError('No response from server, the server could be temporary unavailable or it is handling too '
'many requests. Please try again later.')
@logger
def upload_command(client: Client, arguments: Dict[str, Any]):
"""
Upload a file to the API for later conversion
Args:
client: CloudConvert client to use
arguments: All command arguments - either 'url' or 'entry_id'.
Returns:
CommandResults object containing the results of the upload action as returned from the API and its
readable output
"""
if arguments.get('url'):
if arguments.get('entry_id'):
raise ValueError('Both url and entry id were inserted - please insert only one.')
results = client.upload_url(arguments)
elif arguments.get('entry_id'):
demisto.debug('getting the path of the file from its entry id')
result = demisto.getFilePath(arguments.get('entry_id'))
if not result:
raise ValueError('No file was found for given entry id')
file_path, file_name = result['path'], result['name']
results = client.upload_entry_id(file_path, file_name)
else:
raise ValueError('No url or entry id specified.')
raise_error_if_no_data(results)
format_operation_title(results)
results_data = results.get('data')
readable_output = tableToMarkdown(
'Upload Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
@logger
def convert_command(client: Client, arguments: Dict[str, Any]):
"""
Convert a file that was priorly uploaded
Args:
client: CloudConvert client to use
arguments: All command arguments, the fields 'task_id' and 'output_format'
Returns:
CommandResults object containing the results of the convert action as returned from the API and its readable output
"""
results = client.convert(arguments)
raise_error_if_no_data(results)
results_data = results.get('data')
readable_output = tableToMarkdown(
'Convert Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
@logger
def check_status_command(client: Client, arguments: Dict[str, Any]):
"""
Check status of an existing operation using it's task id
Args:
client: CloudConvert client to use
arguments: All command arguments, the field 'task_id'
Note: When the checked operation is 'download', the field 'create_war_room_entry' should be set according
to the chosen download method, true if downloading as war room entry and false if not.
This way a war room entry containing the file will be created if needed.
Returns:
CommandResults object containing the results of the check status action as returned from the API
and its readable output OR if the argument create_war_room_entry is set to True, then a war room entry is also
being created.
"""
results = client.check_status(arguments)
raise_error_if_no_data(results)
format_operation_title(results)
results_data = results.get('data', {})
# If checking on an download to entry operation, manually change the operation name
# This is because the 'download as entry' operation is our variation on the export to url operation,
# hence not distinguished as a different operation by the API
if argToBoolean(arguments.get('create_war_room_entry', False)) \
and results_data.get('operation') == 'download/url':
results['data']['operation'] = 'download/entry'
# Check if an download to war room entry operation is finished
# If it did - create the entry
if results_data.get('status') == 'finished' \
and argToBoolean(arguments.get('create_war_room_entry', 'False'))\
and results_data.get('operation') == 'download/entry':
modify_results_dict(results_data)
url = results_data.get('url')
file_name = results_data.get('file_name')
file_data = client.get_file_from_url(url)
war_room_file = fileResult(filename=file_name, data=file_data, file_type=entryTypes['entryInfoFile'])
readable_output = tableToMarkdown('Check Status Results', remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids',
'file_name', 'url'),
headerTransform=string_to_table_header,)
return_results(CommandResults(
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
readable_output=readable_output,
outputs=remove_empty_elements(results_data)
))
return war_room_file
else:
modify_results_dict(results_data)
readable_output = tableToMarkdown(
'Check Status Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids', 'file_name', 'url'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
def modify_results_dict(results_data: Dict[str, Any]):
"""
The results of the specific file converted/uploaded/downloaded are sub-values of some keys,
so parse the results field to the outer scope of the dict
Args:
results_data: the dict under the 'data' field in the response's results
"""
if results_data.get('result'):
results_info = results_data.get('result', {}).get('files')
if results_info:
results_data['file_name'] = results_info[0].get('filename')
results_data['url'] = results_info[0].get('url')
results_data['size'] = results_info[0].get('size')
@logger
def download_command(client: Client, arguments: Dict[str, Any]):
"""
Download a converted file back to the user, either as a url or directly as a war room entry
Note: in order to get the resulted url/entry of the file you need to use a check-status command as well,
since the response of the download command is usually responded before the file is fully downloaded (hence the
'status' field is 'waiting', and not 'finished')
Args:
client: CloudConvert client to use
arguments: All command arguments, the fields 'task_id', and 'download_as' (url/war_room_entry)
Returns:
CommandResults object containing the results of the download action as returned from the API, and its readable
"""
# Call download as url request
# In both url and war room entry we still first get a url
results = client.download_url(arguments)
raise_error_if_no_data(results)
# If downloading as war room entry, manually change the operation name
# This is because the 'download as entry' operation is our variation on the export to url operation,
# hence not distinguished as a different operation by the API
if arguments['download_as'] == 'war_room_entry':
results['data']['operation'] = 'download/entry'
else:
format_operation_title(results)
results_data = results.get('data')
readable_output = tableToMarkdown(
'Download Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
def test_module(client: Client):
"""
Returning 'ok' indicates that the integration works like it suppose to. Connection to the service is successful.
Args:
client: CloudConvert client
Returns:
'ok' if test passed, anything else will fail the test
"""
dummy_url = 'https://raw.githubusercontent.com/demisto/content/master/TestData/pdfworking.pdf'
result = client.upload_url({'url': dummy_url})
if result.get('data'):
return 'ok'
elif result.get('message') == "Unauthenticated.":
return 'Authorization Error: make sure API Key is correctly set'
elif result.get('message'):
return result.get('message')
else:
return 'No response from server, the server could be temporary unavailable or it is handling too ' \
'many requests. Please try again later.'
def format_operation_title(results: Dict[str, Any]):
"""
This function is being used in order to change the titles of the operations that are done by the API and are
returned in the response to titles that makes more sense for the users actions, and matches the API's use in
our system.
Args:
results: The response from the http request
"""
title_exchange_dict = {
'import/url': 'upload/url',
'import/upload': 'upload/entry',
'export/url': 'download/url'}
operation = results['data']['operation']
results['data']['operation'] = title_exchange_dict[operation] if operation in title_exchange_dict.keys() \
else operation
def main() -> None:
try:
command = demisto.command()
params = demisto.params()
api_key = params.get('apikey')
verify = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(headers, verify, proxy)
if command == 'cloudconvert-upload':
return_results(upload_command(client, demisto.args()))
elif command == 'cloudconvert-convert':
return_results(convert_command(client, demisto.args()))
elif command == 'cloudconvert-check-status':
return_results(check_status_command(client, demisto.args()))
elif command == 'cloudconvert-download':
return_results(download_command(client, demisto.args()))
elif command == 'test-module':
return_results(test_module(client))
except Exception as e:
err_msg = 'Task id not found or expired' if 'No query results for model' in str(e) else \
('No more conversion minutes for today for this user' if 'Payment Required' in str(e) else str(e))
return_error(f'Failed to execute {command} command. Error: {err_msg}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 37.484725 | 123 | 0.639065 |
0ab7436f0492d2dec23bf952529e99ce9b35b6d4
| 117 |
py
|
Python
|
classification/scripts/get_configsSize.py
|
s-santoro/lunch-crawler
|
1e39b1d35d76067a55b2c034d0488a6ec53f8a45
|
[
"Apache-2.0"
] | 1 |
2020-07-11T04:24:40.000Z
|
2020-07-11T04:24:40.000Z
|
classification/scripts/get_configsSize.py
|
s-santoro/lunch-crawler
|
1e39b1d35d76067a55b2c034d0488a6ec53f8a45
|
[
"Apache-2.0"
] | null | null | null |
classification/scripts/get_configsSize.py
|
s-santoro/lunch-crawler
|
1e39b1d35d76067a55b2c034d0488a6ec53f8a45
|
[
"Apache-2.0"
] | null | null | null |
import os
from configs.Configurations import Configurations
configs = Configurations().configs
print(len(configs))
| 16.714286 | 49 | 0.820513 |
077e695b9af2564011604b4c4473cecbffac1320
| 250 |
py
|
Python
|
exercises/zh/test_02_10_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/zh/test_02_10_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/zh/test_02_10_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
"doc1.similarity(doc2)" in __solution__ or "doc2.similarity(doc1)" in __solution__
), "你有计算两个doc之间的相似度吗?"
assert (
0 <= float(similarity) <= 1
), "相似度分数是一个浮点数。你确定你计算正确了吗?"
__msg__.good("棒棒哒!")
| 27.777778 | 90 | 0.612 |
07ed2f164464d0ace36548fb4bd47260d456faeb
| 225 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_7_Datetime_Module/85. number of days between two given dates.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_7_Datetime_Module/85. number of days between two given dates.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_7_Datetime_Module/85. number of days between two given dates.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
"""
Write a function that calculates the number of days between two given dates.
Input Data:
Date1 = 2011-1-1
Date2 = 2021-1-1'
"""
import datetime
def date_diff(Date1, Date2):
delta = Date2 - Date1
return (delta)
| 16.071429 | 76 | 0.693333 |
ed10cca5481e57a735298c518e21319ede230312
| 511 |
py
|
Python
|
hola_hercy.py
|
Hercita/POO
|
879d1e5462619579a5aec710bf3bc3ec6232de72
|
[
"CC0-1.0"
] | null | null | null |
hola_hercy.py
|
Hercita/POO
|
879d1e5462619579a5aec710bf3bc3ec6232de72
|
[
"CC0-1.0"
] | null | null | null |
hola_hercy.py
|
Hercita/POO
|
879d1e5462619579a5aec710bf3bc3ec6232de72
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Escribir SCRIPT que pregunte el nombre del usuario en la consola y un numero entero e imprima
por pantalla en lineas distintas el nombre del usuario tantas veces como el numero
"""
name = input("¿Como te llamas? ") # input campo de entrada de datos, en python no se pone el punto y coma
n = input("Introduce un numero entero: ") # input campo de entrda de datos
print ((name + "\n") * int(n)) # /n es un salto de linea, * multiplicacion, con int decimos que n es entero
| 46.454545 | 110 | 0.694716 |
b9bf59b2a3598e683d8a7c3d04f091a528408841
| 1,215 |
py
|
Python
|
Models/classTemplateTF/util/model_builder.py
|
guillaumedescoteauxisabelle/compai
|
b1f4c0c7c1995233461dd3a3a73c6f6bafbf98f2
|
[
"Apache-2.0"
] | null | null | null |
Models/classTemplateTF/util/model_builder.py
|
guillaumedescoteauxisabelle/compai
|
b1f4c0c7c1995233461dd3a3a73c6f6bafbf98f2
|
[
"Apache-2.0"
] | null | null | null |
Models/classTemplateTF/util/model_builder.py
|
guillaumedescoteauxisabelle/compai
|
b1f4c0c7c1995233461dd3a3a73c6f6bafbf98f2
|
[
"Apache-2.0"
] | 1 |
2020-01-07T17:57:57.000Z
|
2020-01-07T17:57:57.000Z
|
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.applications import MobileNet
def mobilenet_transfer(class_number):
"""Return a classification model with a mobilenet backbone pretrained on ImageNet
# Arguments:
class_number: Number of classes / labels to detect
"""
# Import the mobilenet model and discards the last 1000 neuron layer.
base_model = MobileNet(input_shape=(224,224,3), weights='imagenet',include_top=False, pooling='avg')
x = base_model.output
x = tf.keras.layers.Dense(1024,activation='relu')(x)
x = tf.keras.layers.Dense(1024,activation='relu')(x)
x = tf.keras.layers.Dense(512,activation='relu')(x)
# Final layer with softmax activation
preds = tf.keras.layers.Dense(class_number,activation='softmax')(x)
# Build the model
model = tf.keras.models.Model(inputs=base_model.input,outputs=preds)
# Freeze base_model
# for layer in base_model.layers: # <=> to [:86]
# layer.trainable = False
# Freeze the first 60 layers and fine-tune the rest
for layer in model.layers[:60]:
layer.trainable=False
for layer in model.layers[60:]:
layer.trainable=True
return model
| 37.96875 | 104 | 0.702881 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.