hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21c77d5dbc21a2e76cc25360a58f05ff0f35abbd
| 8,936 |
py
|
Python
|
test/smallTests/test_WithoutOGS.py
|
mcwimm/pyMANGA
|
6c7b53087e53b116bb02f91c33974f3dfd9a46de
|
[
"MIT"
] | 1 |
2021-03-16T08:35:50.000Z
|
2021-03-16T08:35:50.000Z
|
test/smallTests/test_WithoutOGS.py
|
mcwimm/pyMANGA
|
6c7b53087e53b116bb02f91c33974f3dfd9a46de
|
[
"MIT"
] | 67 |
2019-11-14T11:29:52.000Z
|
2022-03-09T14:37:11.000Z
|
test/smallTests/test_WithoutOGS.py
|
mcwimm/pyMANGA
|
6c7b53087e53b116bb02f91c33974f3dfd9a46de
|
[
"MIT"
] | 6 |
2019-11-12T11:11:41.000Z
|
2021-08-12T13:57:22.000Z
|
# This script tests pyMANGA using seven setups
# The first test only checks whether the setups can be calculated without
# errors
# The second test compares the calculated results with reference results
import sys
from os import path
import os
sys.path.append(
path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from ProjectLib import XMLtoProject
from TimeLoopLib import TreeDynamicTimeStepping
import unittest
import glob
import os
from lxml import etree
import shutil
from pathlib import Path
import pandas as pd
manga_root_directory = path.dirname(
path.dirname(path.dirname(path.abspath(__file__))))
filepath_examplesetups = path.join(path.dirname(path.abspath(__file__)),
"testSetupsWithoutOGS/*.xml")
xml = glob.glob(filepath_examplesetups)
xml.sort()
example_setups = []
errors = []
errors_compare = []
errors_empty_comparison = []
errors_empty_results = []
testlist = []
global output_exist
output_exist = str
global seperator
seperator = "/"
# MARKER:
if xml:
for xmlfile in xml:
print("________________________________________________")
print("In the following the setup", xmlfile, "is tested.")
print("________________________________________________")
def findChild(parent, key):
child = parent.find(key)
return child
tree = etree.parse(xmlfile)
root = tree.getroot()
for tag in root.iter():
tag.text = tag.text.strip()
output = findChild(root, "tree_output")
output_type_xml_element = findChild(output, "type")
output_type = output_type_xml_element.text
if not output_type == "NONE":
output_dir_xml_element = findChild(output, "output_dir")
output_dir = path.join(manga_root_directory,
output_dir_xml_element.text)
if not os.path.exists(output_dir):
output_exist = False
os.makedirs(output_dir)
else:
output_exist = True
old_results = glob.glob(path.join(output_dir, "*.*"))
if old_results:
for result in old_results:
os.remove(result)
e, filename = os.path.split(xmlfile)
else:
errors_empty_results.append(xmlfile)
e, filename = os.path.split(xmlfile)
comparison_file_dir_in_pieces = (path.join(
path.dirname(path.abspath(__file__))), "referenceFiles", filename,
"*.*")
comparison_file_dir = seperator.join(comparison_file_dir_in_pieces)
files_comparison = glob.glob(comparison_file_dir)
example_setups.append(filename)
class MyTest(unittest.TestCase):
def test1(self):
# Test of MANGA project file and the correct calculation of its
try:
prj = XMLtoProject(xml_project_file=xmlfile)
time_stepper = TreeDynamicTimeStepping(prj)
prj.runProject(time_stepper)
# Storing failed test for clear evaluation
except:
self.fail(errors.append(xmlfile))
def test2(self):
# Query whether a reference file for the setup is not available
if not files_comparison:
errors_empty_comparison.append(xmlfile)
# If a reference file is available, it will be compared with the
# calculated results
else:
files_result = glob.glob(path.join(output_dir, "*"))
if files_result:
for y in range(len(files_result)):
test = (
pd.read_csv(files_result[y],
delimiter='\t').drop('tree',
axis=1) -
pd.read_csv(
files_comparison[y], delimiter='\t').drop(
'tree', axis=1)).values.any() == 0
try:
assert test == True
except:
self.fail(errors_compare.append(xmlfile))
if __name__ == "__main__":
unittest.main(exit=False)
# remove created output
if not output_type == "NONE":
if not output_exist:
shutil.rmtree((output_dir[:-1]), ignore_errors=True)
elif output_exist:
old_results = glob.glob(path.join(output_dir, "*.*"))
for result in old_results:
os.remove(result)
print("The setup", xmlfile, "was tested.")
print("________________________________________________")
print("""
The testing of all setups is finished.
print("")
________________________________________________
________________________________________________
########
#Report#
########
________________________________________________
________________________________________________
""")
if not len(example_setups) == 1:
print("The following sample setups have been tested:")
else:
print("The following sample setup have been tested:")
print("")
for setup in example_setups:
print("")
print(setup)
print("________________________________________________")
print("________________________________________________")
print("")
print("Result of the first test:")
print("")
if errors:
print("An error occured while testing the following setup(s):")
n = range(len(errors))
for x in n:
print("")
print(errors[x])
print("")
else:
print("The first test of all setups were successful.")
print("________________________________________________")
print("________________________________________________")
print("")
print("Result of the second test:")
print("")
if errors_empty_comparison and errors_compare:
print('An error occured when comparing the result of the following '
'setup:')
for x in range(len(errors_compare)):
print("")
print(errors_compare[x])
print("")
print('It should be noted further:')
print('There are missing files for the comparison of the result '
'of the following setups:')
for x in range(len(errors_empty_comparison)):
print("")
print(errors_empty_comparison[x])
print("")
elif errors_empty_comparison:
print("There is/are missing file(s) for the comparison of the result "
"of the following setup(s):")
print("")
n = range(len(errors_empty_comparison))
for x in n:
print("")
print(errors_empty_comparison[x])
print("")
print("The comparison of the result of the other setups "
"with the comparison files was successful.")
else:
if errors_compare:
print("An error occurred when comparing the result(s) of the "
"following setup(s) with the comparison file(s):")
print("")
for x in range(len(errors_compare)):
print("")
print(errors_compare[x])
print("")
if errors_empty_results:
print("Please also note that the following sample setup(s) "
"do not save model results and therefore could not "
"be checked:")
print("")
n = len(errors_empty_results)
for x in n:
print(errors_empty_results[x])
print("")
else:
if errors_empty_results:
print("""The comparison of the result of the setups
with the comparison files was successful. Please
note, however, that the following sample setups do
not save model results and therefore could not be
"checked:""")
print("")
n = len(errors_empty_results)
for x in n:
print("")
print(errors_compare[x])
print("")
else:
print("The comparison of the result of the setups "
"with the comparison files was successful.")
print("________________________________________________")
print("________________________________________________")
else:
print("Unfortunately no project-file could be found.")
| 37.078838 | 79 | 0.56748 |
1d1d8fb439380425aa5583e73ef78fe6653d2868
| 1,974 |
py
|
Python
|
src/bo4e/com/tarifpreispositionproort.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/com/tarifpreispositionproort.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/com/tarifpreispositionproort.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Contains TarifpreispositionProOrt class
and corresponding marshmallow schema for de-/serialization
"""
from typing import List
import attr
from marshmallow import fields
from bo4e.com.com import COM, COMSchema
from bo4e.com.tarifpreisstaffelproort import TarifpreisstaffelProOrt, TarifpreisstaffelProOrtSchema
from bo4e.validators import check_list_length_at_least_one
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class TarifpreispositionProOrt(COM):
"""
Mit dieser Komponente können Tarifpreise verschiedener Typen abgebildet werden
.. HINT::
`TarifpreispositionProOrt JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/TarifpreispositionProOrtSchema.json>`_
"""
# required attributes
#: Postleitzahl des Ortes für den der Preis gilt
postleitzahl: str = attr.ib(validator=attr.validators.matches_re(r"^\d{5}$"))
#: Ort für den der Preis gilt
ort: str = attr.ib(validator=attr.validators.instance_of(str))
#: ene't-Netznummer des Netzes in dem der Preis gilt
netznr: str = attr.ib(validator=attr.validators.instance_of(str))
# Hier sind die Staffeln mit ihren Preisenangaben definiert
preisstaffeln: List[TarifpreisstaffelProOrt] = attr.ib(
validator=[
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(TarifpreisstaffelProOrt),
iterable_validator=check_list_length_at_least_one,
),
]
)
# there are no optional attributes
class TarifpreispositionProOrtSchema(COMSchema):
"""
Schema for de-/serialization of TarifpreispositionProOrt.
"""
class_name = TarifpreispositionProOrt
# required attributes
postleitzahl = fields.Str()
ort = fields.Str()
netznr = fields.Str()
preisstaffeln = fields.List(fields.Nested(TarifpreisstaffelProOrtSchema))
| 34.631579 | 203 | 0.741135 |
d592d46fb1f10de739404ea2afa4efa361c4cd96
| 2,529 |
py
|
Python
|
mcsf/commands/up.py
|
beremaran/mcsf
|
7651e849b860cf4f0517fa8e4fc4b7a417bbc3a4
|
[
"MIT"
] | null | null | null |
mcsf/commands/up.py
|
beremaran/mcsf
|
7651e849b860cf4f0517fa8e4fc4b7a417bbc3a4
|
[
"MIT"
] | null | null | null |
mcsf/commands/up.py
|
beremaran/mcsf
|
7651e849b860cf4f0517fa8e4fc4b7a417bbc3a4
|
[
"MIT"
] | null | null | null |
import time
import logging
from mcsf.commands.base import Command
from mcsf.services.backup import BackupService
from mcsf.services.json_storage import JsonStorage
from mcsf.services.ssh import SshService
from mcsf.services.vultr import VultrService
class UpCommand(Command):
def __init__(self):
self.json_storage = JsonStorage()
def handle(self, args):
alias = args.alias
storage_key = 'SERVER_{}'.format(alias)
if not self.json_storage.has('SSHKEYID'):
logging.error('Please configure MCSF first.')
exit(1)
vultr = VultrService()
if self.json_storage.has(storage_key):
logging.error('This alias is in use.')
exit(2)
logging.info('Creating new server ...')
sub_id = vultr.start_new_server()
self.json_storage.set(storage_key, sub_id)
logging.info('Waiting server to get online ...')
server = {}
while True:
try:
server = vultr.get_server_info(sub_id)
time.sleep(5)
if server['main_ip'] == '0.0.0.0':
continue
except KeyError:
continue
break
logging.info('Connecting to server ...')
ssh = SshService(server['main_ip'])
backup_service = BackupService(alias, ssh)
logging.info('Installing Java Runtime Environment ...')
ssh.exec('apt-get update')
ssh.exec('apt-get install -y default-jre')
logging.info('Installing unzip ...')
ssh.exec('apt-get install -y zip unzip')
if backup_service.has_backup():
logging.info('Restoring backup ...')
backup_service.restore()
else:
logging.info('Downloading Minecraft server ...')
ssh.exec('wget https://launcher.mojang.com/v1/objects/3dc3d84a581f14691199cf6831b71ed1296a9fdf/server.jar')
logging.info('Running the server first time ...')
ssh.exec('java -Xmx1024M -Xms1024M -jar server.jar nogui')
logging.info('Accepting EULA ...')
ssh.exec("sed -i 's/false/true/g' eula.txt")
logging.info('Installation completed.')
logging.info('Starting Minecraft server ...')
ssh.exec('nohup java -Xmx1024M -Xms1024M -jar server.jar nogui &')
logging.info('Connect to server:')
logging.info('{}:{}'.format(server['main_ip'], 25565))
logging.info('Please wait while server is initializing!')
| 33.72 | 119 | 0.605773 |
98a61ea0ef056a7e141b295b3ae78569040d414d
| 768 |
py
|
Python
|
tournamentmasters/command_tournament_master.py
|
jorgeparavicini/FourWins
|
1c5e8a23b4464ef6b71d70c9ff040aa004b9ca83
|
[
"MIT"
] | 1 |
2021-01-20T18:33:01.000Z
|
2021-01-20T18:33:01.000Z
|
tournamentmasters/command_tournament_master.py
|
jorgeparavicini/FourWins
|
1c5e8a23b4464ef6b71d70c9ff040aa004b9ca83
|
[
"MIT"
] | null | null | null |
tournamentmasters/command_tournament_master.py
|
jorgeparavicini/FourWins
|
1c5e8a23b4464ef6b71d70c9ff040aa004b9ca83
|
[
"MIT"
] | 2 |
2019-09-04T08:27:14.000Z
|
2019-09-06T20:32:30.000Z
|
from bots import BaseBot
from tournamentmasters.tournament_master import TournamentMaster
class CommandTournamentMaster(TournamentMaster):
def __init__(self, bot_1: BaseBot, bot_2: BaseBot, grid_width: int, grid_height: int,
time_between_rounds: float = 0):
super(CommandTournamentMaster, self).__init__(bot_1, bot_2, grid_width, grid_height, time_between_rounds)
self.winner_id = -1
def on_turn_end(self, bot_played: BaseBot):
self.grid.print()
print("---------------------\n")
def on_winner_found(self, winner_bot: BaseBot):
print(f'{winner_bot.name} {winner_bot.id} WOOOOOOON')
self.winner_id = winner_bot.id
def play(self):
super().play()
return self.winner_id
| 33.391304 | 113 | 0.674479 |
7f33f969861ce36707f69e0d676d19ed8854e14c
| 1,191 |
py
|
Python
|
2-resources/python-data-generation/generate-random-data-into-dynamodb.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/python-data-generation/generate-random-data-into-dynamodb.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/python-data-generation/generate-random-data-into-dynamodb.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey
from boto.regioninfo import RegionInfo
from boto.dynamodb2.layer1 import DynamoDBConnection
from faker import Factory
import uuid
import time
try:
sessions = Table(
table_name='usertable',
schema=[HashKey('id')],
connection=DynamoDBConnection(
region=RegionInfo(name='eu-west-1',
endpoint='dynamodb.eu-west-1.amazonaws.com')
))
except:
print("connection not successful")
def create_session():
id = str(uuid.uuid4())
timestamp = time.strftime("%Y%m%d%H%M%S")
ipv4 = Factory.create().ipv4()
users_id = Factory.create().slug()
users_name = Factory.create().first_name()
users_surname = Factory.create().last_name()
res = sessions.put_item(data={
'username': id,
'data': {
'user_id': users_id,
'name' : users_name,
'surname' : users_surname,
'ip': str(ipv4),
'datetime': timestamp
}
})
print('Created: ' + str(res))
if __name__ == '__main__':
for x in range(20):
create_session()
| 25.891304 | 70 | 0.61461 |
7f6acd31318f5c4bfd333a4da43cc8cda74af81a
| 237 |
py
|
Python
|
Python/Exercícios_Python/001_=_deixando_tudo_pronto.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/001_=_deixando_tudo_pronto.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/001_=_deixando_tudo_pronto.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""001 = Deixando tudo pronto
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GhfZy4Dql6Q-h1khGuR5Uji39RilKx9T
"""
msg = "Olá, Mundo"
print(msg)
| 21.545455 | 77 | 0.729958 |
f6ddc6d52a2697c2e00fad3667afa3431dfb6e82
| 6,648 |
py
|
Python
|
plugins/tff_backend/tff_backend_plugin.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/tff_backend_plugin.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178 |
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/tff_backend_plugin.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2 |
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from framework.bizz.authentication import get_current_session
from framework.plugin_loader import get_plugin, BrandingPlugin
from framework.utils.plugins import Handler, Module
from mcfw.consts import AUTHENTICATED, NOT_AUTHENTICATED
from mcfw.restapi import rest_functions, register_postcall_hook
from mcfw.rpc import parse_complex_value
from plugins.rogerthat_api.rogerthat_api_plugin import RogerthatApiPlugin
from plugins.tff_backend import rogerthat_callbacks
from plugins.tff_backend.api import investor, nodes, global_stats, users, audit, agenda, flow_statistics, \
installations, nodes_unauthenticated
from plugins.tff_backend.bizz.authentication import get_permissions_from_scopes, get_permission_strings, Roles
from plugins.tff_backend.bizz.statistics import log_restapi_call_result
from plugins.tff_backend.configuration import TffConfiguration
from plugins.tff_backend.handlers.cron import RebuildSyncedRolesHandler, UpdateGlobalStatsHandler, \
SaveNodeStatusesHandler, BackupHandler, CheckNodesOnlineHandler, ExpiredEventsHandler, RebuildFirebaseHandler, \
CheckOfflineNodesHandler, CheckStuckFlowsHandler
from plugins.tff_backend.handlers.index import IndexPageHandler
from plugins.tff_backend.handlers.testing import AgreementsTestingPageHandler
from plugins.tff_backend.handlers.update_app import UpdateAppPageHandler
from plugins.tff_backend.patch_onfido_lib import patch_onfido_lib
class TffBackendPlugin(BrandingPlugin):
def __init__(self, configuration):
super(TffBackendPlugin, self).__init__(configuration)
self.configuration = parse_complex_value(TffConfiguration, configuration, False) # type: TffConfiguration
rogerthat_api_plugin = get_plugin('rogerthat_api')
assert (isinstance(rogerthat_api_plugin, RogerthatApiPlugin))
rogerthat_api_plugin.subscribe('app.installation_progress', rogerthat_callbacks.installation_progress)
rogerthat_api_plugin.subscribe('messaging.flow_member_result', rogerthat_callbacks.flow_member_result)
rogerthat_api_plugin.subscribe('messaging.form_update', rogerthat_callbacks.form_update)
rogerthat_api_plugin.subscribe('messaging.update', rogerthat_callbacks.messaging_update)
rogerthat_api_plugin.subscribe('messaging.poke', rogerthat_callbacks.messaging_poke)
rogerthat_api_plugin.subscribe('friend.is_in_roles', rogerthat_callbacks.friend_is_in_roles)
rogerthat_api_plugin.subscribe('friend.update', rogerthat_callbacks.friend_update)
rogerthat_api_plugin.subscribe('friend.invite_result', rogerthat_callbacks.friend_invite_result)
rogerthat_api_plugin.subscribe('friend.register_result', rogerthat_callbacks.friend_register_result)
rogerthat_api_plugin.subscribe('system.api_call', rogerthat_callbacks.system_api_call)
patch_onfido_lib()
register_postcall_hook(log_restapi_call_result)
def get_handlers(self, auth):
yield Handler(url='/', handler=IndexPageHandler)
yield Handler(url='/update-app', handler=UpdateAppPageHandler)
yield Handler(url='/testing/agreements', handler=AgreementsTestingPageHandler)
authenticated_handlers = [nodes, investor, global_stats, users, audit, agenda, flow_statistics, installations]
for _module in authenticated_handlers:
for url, handler in rest_functions(_module, authentication=AUTHENTICATED):
yield Handler(url=url, handler=handler)
not_authenticated_handlers = [nodes_unauthenticated]
for _module in not_authenticated_handlers:
for url, handler in rest_functions(_module, authentication=NOT_AUTHENTICATED):
yield Handler(url=url, handler=handler)
if auth == Handler.AUTH_ADMIN:
yield Handler(url='/admin/cron/tff_backend/backup', handler=BackupHandler)
yield Handler(url='/admin/cron/tff_backend/rebuild_synced_roles', handler=RebuildSyncedRolesHandler)
yield Handler(url='/admin/cron/tff_backend/global_stats', handler=UpdateGlobalStatsHandler)
yield Handler(url='/admin/cron/tff_backend/check_nodes_online', handler=CheckNodesOnlineHandler)
yield Handler(url='/admin/cron/tff_backend/check_offline_nodes', handler=CheckOfflineNodesHandler)
yield Handler(url='/admin/cron/tff_backend/save_node_statuses', handler=SaveNodeStatusesHandler)
yield Handler(url='/admin/cron/tff_backend/events/expired', handler=ExpiredEventsHandler)
yield Handler(url='/admin/cron/tff_backend/check_stuck_flows', handler=CheckStuckFlowsHandler)
yield Handler(url='/admin/cron/tff_backend/rebuild_firebase', handler=RebuildFirebaseHandler)
def get_client_routes(self):
return ['/orders<route:.*>', '/node-orders<route:.*>', '/investment-agreements<route:.*>',
'/global-stats<route:.*>', '/users<route:.*>', '/agenda<route:.*>', '/flow-statistics<route:.*>',
'/installations<route:.*>', '/dashboard<route:.*>', '/nodes<route:.*>']
def get_modules(self):
perms = get_permissions_from_scopes(get_current_session().scopes)
is_admin = Roles.BACKEND_ADMIN in perms or Roles.BACKEND in perms
yield Module(u'tff_dashboard', [], 0)
if is_admin or Roles.BACKEND_READONLY in perms:
yield Module(u'tff_orders', [], 1)
yield Module(u'tff_global_stats', [], 3)
yield Module(u'tff_users', [], 4)
yield Module(u'tff_agenda', [], 5)
yield Module(u'tff_flow_statistics', [], 6)
yield Module(u'tff_installations', [], 7)
for role in [Roles.BACKEND_READONLY, Roles.NODES, Roles.NODES_READONLY , Roles.NODES_ADMIN]:
if is_admin or role in perms:
yield Module(u'tff_nodes', [], 8)
break
if is_admin:
yield Module(u'tff_investment_agreements', [], 2)
def get_permissions(self):
return get_permission_strings(get_current_session().scopes)
| 60.436364 | 118 | 0.752858 |
122994747c2a13127ad78bcd1d5fd256224d5531
| 1,815 |
py
|
Python
|
tests/test_aio_trampoline.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 126 |
2019-09-16T15:28:20.000Z
|
2022-03-20T10:57:53.000Z
|
tests/test_aio_trampoline.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 54 |
2019-09-30T08:44:01.000Z
|
2022-03-20T11:10:00.000Z
|
tests/test_aio_trampoline.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 11 |
2020-01-02T08:32:46.000Z
|
2022-03-20T11:10:24.000Z
|
import pytest
from hypothesis import assume, given
from pfun import compose, identity
from pfun.aio_trampoline import Done
from pfun.hypothesis_strategies import aio_trampolines, anything, unaries
from .monad_test import MonadTest
class TestTrampoline(MonadTest):
@pytest.mark.asyncio
@given(aio_trampolines(anything()))
async def test_right_identity_law(self, trampoline):
assert (await
trampoline.and_then(Done).run()) == (await trampoline.run())
@pytest.mark.asyncio
@given(anything(), unaries(aio_trampolines(anything())))
async def test_left_identity_law(self, value, f):
assert (await Done(value).and_then(f).run()) == (await f(value).run())
@pytest.mark.asyncio
@given(
aio_trampolines(anything()),
unaries(aio_trampolines(anything())),
unaries(aio_trampolines(anything()))
)
async def test_associativity_law(self, trampoline, f, g):
assert (await trampoline.and_then(f).and_then(g).run(
)) == (await trampoline.and_then(lambda x: f(x).and_then(g)).run())
@given(anything())
def test_equality(self, value):
assert Done(value) == Done(value)
@given(anything(), anything())
def test_inequality(self, first, second):
assume(first != second)
assert Done(first) != Done(second)
@pytest.mark.asyncio
@given(anything())
async def test_identity_law(self, value):
assert (await
Done(value).map(identity).run()) == (await Done(value).run())
@pytest.mark.asyncio
@given(unaries(anything()), unaries(anything()), anything())
async def test_composition_law(self, f, g, value):
h = compose(f, g)
assert (await Done(value).map(g).map(f).run()
) == (await Done(value).map(h).run())
| 33.611111 | 78 | 0.653994 |
d617560fad487fb40c30b78a99e13b3301e8e135
| 1,088 |
py
|
Python
|
python/coursera_python/MICHIGAN/WEB/week4/asss.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/MICHIGAN/WEB/week4/asss.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/MICHIGAN/WEB/week4/asss.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# socketTest.py
import socket
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect(('http://data.pr4e.org/intro-short.txt', 80))
mysocket.send('GET intro-short.txt HTTP/1.0\n')
mysocket.send('Host: www.pythonlearn.com\n\n')
# mysocket.send(b'GET http://www.pythonlearn.com/code/intro-short.txt HTTP/1.1 Host: www.pythonlearn.com Proxy-Connection: keep-alive Cache-Control: max-age=0 Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8 Accept-Encoding: gzip, deflate, sdch Accept-Language: zh-CN,zh;q=0.8,en;q=0.6 Cookie: __cfduid=da807f472bbfb5777530c786a56bc13491472801448 If-None-Match: W/"1d3-521e9853a392b" If-Modified-Since: Mon, 12 Oct 2015 14:55:29 GMT')
# mysocket.connect(('www.py4inf.com', 80))
# mysocket.send('GET http://www.py4inf.com/code/romeo.txt HTTP/1.0\n\n')
while True:
data = mysocket.recv(512)
if(len(data) < 1):
break
print(data)
mysocket.close()
| 51.809524 | 620 | 0.748162 |
4eef2b9c8a7122b31e2cf15f614f728388893bc9
| 825 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v7_2/setup_auto_close_settings.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v7_2/setup_auto_close_settings.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v7_2/setup_auto_close_settings.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
# update the selling settings and set the close_opportunity_after_days
frappe.reload_doc("selling", "doctype", "selling_settings")
frappe.db.set_value("Selling Settings", "Selling Settings", "close_opportunity_after_days", 15)
# Auto close Replied opportunity
frappe.db.sql("""update `tabOpportunity` set status='Closed' where status='Replied'
and date_sub(curdate(), interval 15 Day)>modified""")
# create Support Settings doctype and update close_issue_after_days
frappe.reload_doc("support", "doctype", "support_settings")
frappe.db.set_value("Support Settings", "Support Settings", "close_issue_after_days", 7)
| 45.833333 | 96 | 0.781818 |
4ef11c982f683b48e3440a58f4b6ee74ea937fe7
| 777 |
py
|
Python
|
Projects/Opencv/read&write&showvideo.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | 1 |
2021-10-06T13:55:02.000Z
|
2021-10-06T13:55:02.000Z
|
Projects/Opencv/read&write&showvideo.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | null | null | null |
Projects/Opencv/read&write&showvideo.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | null | null | null |
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi',fourcc,20.0,(640,480))
#cap.isOpened()=>will return true value if cammera is linked or file name is correct and false in other case
while cap.isOpened():
ret,frame=cap.read()#ret will store true or false if frame store image the it store true else false , frame will store instant capture frames
if ret:
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) used to change the colout of image
#cv2.imshow('frame',frame)
out.write(frame)
cv2.imshow('video',frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
| 33.782609 | 145 | 0.657658 |
2148772a2395b86c3dfd7797db93b09c8386122d
| 555 |
py
|
Python
|
BITs/2014/Budashov_A_E/task_3_8.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BITs/2014/Budashov_A_E/task_3_8.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BITs/2014/Budashov_A_E/task_3_8.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача N3. Вариант 8
#Напишите программу, которая выводит имя "Борис Николаевич Бугаев", и
#запрашивает его псевдоним. Программа должна сцеплять две эти строки и
#выводить полученную строку, разделяя имя и псевдоним с помощью тире.
#Будашов Андрей
#03.03.2016
print("Герой нашей сегодняшней программы-Борис Николаевич Бугаев")
psev=input("Под каким же псевдонимом он известен? Ваш ответ:")
if(psev)==("Андрей Белый"):
print("Все верно Борис Николаевич Бугаев -" + psev)
else:
print("Вы ошиблись, это не его псевдоним(")
input ("Press Enter to close")
| 37 | 70 | 0.767568 |
dcd89a646ed4de53ab60c6761471c5a725d4fec5
| 3,793 |
py
|
Python
|
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/VQAtrainIter.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/VQAtrainIter.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/VQAtrainIter.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-10-14T07:30:18.000Z
|
2019-10-14T07:30:18.000Z
|
import numpy as np
import mxnet as mx
import bisect
class VQAtrainIter(mx.io.DataIter):
def __init__(self, img, sentences, answer, batch_size, buckets=None, invalid_label=-1,
text_name='text', img_name = 'image', label_name='softmax_label', dtype='float32', layout='NTC'):
super(VQAtrainIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i in range(len(sentences)):
buck = bisect.bisect_left(buckets, len(sentences[i]))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sentences[i])] = sentences[i]
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data]
self.answer = answer
self.img = img
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.text_name = text_name
self.img_name = img_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nd_text = []
self.nd_img = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [(text_name, (batch_size, self.default_bucket_key)),
(img_name, (batch_size, self.default_bucket_key))]
self.provide_label = [(label_name, (batch_size, self.default_bucket_key))]
elif self.major_axis == 1:
self.provide_data = [(text_name, (self.default_bucket_key, batch_size)),
(img_name, (self.default_bucket_key, batch_size))]
self.provide_label = [(label_name, (self.default_bucket_key, batch_size))]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
self.curr_idx = 0
self.nd_text = []
self.nd_img = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck.shape[0])
label = self.answer
self.nd_text.append(mx.ndarray.array(buck, dtype=self.dtype))
self.nd_img.append(mx.ndarray.array(self.img, dtype=self.dtype))
self.ndlabel.append(mx.ndarray.array(label, dtype=self.dtype))
def next(self):
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
img = self.nd_img[i][j:j + self.batch_size].T
text = self.nd_text[i][j:j + self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size]
else:
img = self.nd_img[i][j:j + self.batch_size]
text = self.nd_text[i][j:j + self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
data = [text, img]
return mx.io.DataBatch(data, [label],
bucket_key=self.buckets[i],
provide_data=[(self.text_name, text.shape),(self.img_name, img.shape)],
provide_label=[(self.label_name, label.shape)])
| 41.681319 | 114 | 0.569997 |
b4af6bb9768f39bbf401218d586eb9a4734af7f7
| 258 |
py
|
Python
|
tests/views/test_planing.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
tests/views/test_planing.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
tests/views/test_planing.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
def test_list(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
seeder.create_event(admin_unit_id)
url = utils.get_url("planing")
utils.get_ok(url)
url = utils.get_url("planing", keyword="name")
utils.get_ok(url)
| 25.8 | 50 | 0.693798 |
2591beb5d300408e2e808c5ab6d8dfbf86491647
| 2,496 |
py
|
Python
|
quant/markets/market.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/markets/market.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/markets/market.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import time
from quant import config
class Market(object):
"""
eth_btc
base_currency :btc
quote_currency:eth
"""
def __init__(self, base_currency, market_currency, pair_code, fee_rate):
self._name = None
self.base_currency = base_currency
self.market_currency = market_currency
self.pair_code = pair_code
self.fee_rate = fee_rate
self.depth_updated = 0
self.update_rate = 1
self.is_terminated = False
self.request_timeout = 5 # 5s
self.depth = {'asks': [{'price': 0, 'amount': 0}], 'bids': [{'price': 0, 'amount': 0}]}
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def terminate(self):
self.is_terminated = True
def get_depth(self):
time_diff = time.time() - self.depth_updated
# logging.warn('Market: %s order book1:(%s>%s)', self.name, time_diff, self.depth_updated)
if time_diff > self.update_rate:
logging.debug('%s should update...', self.name)
if not self.ask_update_depth():
return None
time_diff = time.time() - self.depth_updated
# logging.warn('Market: %s order book2:(%s>%s)', self.name, time_diff, self.depth_updated)
if time_diff > config.market_expiration_time:
# logging.warn('Market: %s order book is expired(%s>%s)', self.name, time_diff,
# config.market_expiration_time)
return None
return self.depth
def ask_update_depth(self):
try:
self.update_depth()
# self.convert_to_usd()
self.depth_updated = time.time()
return True
except Exception as e:
logging.error("Can't update market: %s - err:%s" % (self.name, str(e)))
# log_exception(logging.DEBUG)
return False
# traceback.print_exc()
def get_ticker(self):
depth = self.get_depth()
if not depth:
return None
res = {'ask': {'price': 0, 'amount': 0}, 'bid': {'price': 0, 'amount': 0}}
if len(depth['asks']) > 0:
res['ask'] = depth['asks'][0]
if len(depth['bids']) > 0:
res['bid'] = depth['bids'][0]
return res
def update_depth(self):
"""子类重写该方法,每个market的数据不一样"""
pass
| 29.023256 | 98 | 0.563702 |
d301a19fa423c4f3d2a1fc176b0c0b012c9f1beb
| 1,091 |
py
|
Python
|
Contrib-Inspur/openbmc/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Inspur/openbmc/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Inspur/openbmc/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
import subprocess
import oe.lsb
class VirglTest(OERuntimeTestCase):
@OETestDepends(['ssh.SSHTest.test_ssh'])
def test_kernel_driver(self):
status, output = self.target.run('dmesg|grep virgl')
self.assertEqual(status, 0, "Checking for virgl driver in dmesg returned non-zero: %d\n%s" % (status, output))
self.assertIn("virgl 3d acceleration enabled", output, "virgl acceleration seems to be disabled:\n%s" %(output))
@OETestDepends(['virgl.VirglTest.test_kernel_driver'])
def test_kmscube(self):
distro = oe.lsb.distro_identifier()
if distro and distro == 'centos-7':
self.skipTest('kmscube is not working when centos 7 is the host OS')
status, output = self.target.run('kmscube', timeout=30)
self.assertEqual(status, 0, "kmscube exited with non-zero status %d and output:\n%s" %(status, output))
self.assertIn('renderer: "virgl"', output, "kmscube does not seem to use virgl:\n%s" %(output))
| 45.458333 | 120 | 0.698442 |
d3743a8aa5f1510dd3f669978d46692e94eb7fc2
| 459 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/22.01-Binary Search.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/22.01-Binary Search.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/22.01-Binary Search.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
def binary_search(nums, target):
low = 0
high = len(nums) - 1
while low <= high:
mid = low + ((high - low) // 2)
if nums[mid] > target:
high = mid-1
elif nums[mid] < target:
low = mid+1
else:
return mid
return -1
if __name__ == "__main__":
lst = [0, 1, 2, 5, 6, 7, 8]
print(binary_search(lst, 10))
print(binary_search(lst, 7))
print(binary_search(lst, 1))
| 21.857143 | 39 | 0.501089 |
6ccef462688d16cb92cef21492dc0fd5e4283a78
| 2,602 |
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/gocardless_pro/services/base_service.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/gocardless_pro/services/base_service.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/gocardless_pro/services/base_service.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
import re
import time
from requests import Timeout, ConnectionError
from uuid import uuid4
from .. import list_response
from ..api_response import ApiResponse
from ..errors import MalformedResponseError
class BaseService(object):
"""Base class for API service classes."""
def __init__(self, api_client, max_network_retries=3, retry_delay_in_seconds=0.5):
self._api_client = api_client
self.max_network_retries = max_network_retries
self.retry_delay_in_seconds = retry_delay_in_seconds
def _perform_request(self, method, path, params, headers=None, retry_failures=False):
if method == 'POST':
headers = self._inject_idempotency_key(headers)
if retry_failures:
for retries_left in range(self.max_network_retries-1, -1, -1):
try:
return self._attempt_request(method, path, params, headers)
except (Timeout, ConnectionError, MalformedResponseError) as err:
if retries_left > 0:
time.sleep(self.retry_delay_in_seconds)
else:
raise err
else:
return self._attempt_request(method, path, params, headers)
def _attempt_request(self, method, path, params, headers):
if method == 'GET':
return self._api_client.get(path, params=params, headers=headers)
if method == 'POST':
return self._api_client.post(path, body=params, headers=headers)
if method == 'PUT':
return self._api_client.put(path, body=params, headers=headers)
raise ValueError('Invalid method "{}"'.format(method))
def _inject_idempotency_key(self, headers):
headers = headers or {}
if 'Idempotency-Key' not in headers:
headers['Idempotency-Key'] = str(uuid4())
return headers
def _envelope_key(self):
return type(self).RESOURCE_NAME
def _resource_for(self, response):
api_response = ApiResponse(response)
data = api_response.body[self._envelope_key()]
klass = type(self).RESOURCE_CLASS
if isinstance(data, dict):
return klass(data, api_response)
else:
records = [klass(item, api_response) for item in data]
return list_response.ListResponse(records, api_response)
def _sub_url_params(self, url, params):
return re.sub(r':(\w+)', lambda match: params[match.group(1)], url)
| 35.162162 | 89 | 0.646042 |
2c6dd4921a2f7275b0427573adcb23e1fc586fda
| 3,717 |
py
|
Python
|
pyScript/custom_src/Node.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript/custom_src/Node.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript/custom_src/Node.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
from PySide2.QtGui import QColor
class Node:
def __init__(self):
# general attributes
# static:
self.title = ''
self.type = '' # kind of extends the title with further information, f.ex.: 'function input node'
self.description = ''
self.package = None # 'built in' means built in, everything else that the node came from outside (important)
self.has_main_widget = False
self.main_widget_class = None
self.main_widget_pos = ''
self.design_style = 'extended' # default value just for testing
self.color = QColor(198, 154, 21) # QColor(59, 156, 217)
# dynamic: (get copied and then individually edited in NIs)
self.code = '' # only exists in pryScript for source code generation in static nodes (standard)!
self.inputs = []
self.outputs = []
# !!! inputs and outputs may be edited for input-and output nodes in VyFunction !!!
# class GetVariable_Node(Node):
# def __init__(self, parent_variable):
# super(GetVariable_Node, self).__init__()
#
# self.parent_variable = parent_variable
#
# self.title = parent_variable.vy_name
# self.type = 'get variable node'
# self.package = 'built in'
# self.description = 'returns variable'
# # TODO code of GetVariableNode
#
# output_port = NodePort()
# output_port.type = 'data'
# self.outputs.append(output_port)
#
#
class SetVariable_Node(Node):
def __init__(self):
super(SetVariable_Node, self).__init__()
self.title = 'set var'
self.type = 'set variable node'
self.package = 'built in'
self.description = 'sets the value of a variable'
exec_input_port = NodePort()
exec_input_port.type = 'exec'
self.inputs.append(exec_input_port)
var_name_data_input_port = NodePort()
var_name_data_input_port.type = 'data'
var_name_data_input_port.label = 'var'
var_name_data_input_port.widget_pos = 'besides'
self.inputs.append(var_name_data_input_port)
val_name_data_input_port = NodePort()
val_name_data_input_port.type = 'data'
val_name_data_input_port.label = 'val'
val_name_data_input_port.widget_pos = 'besides'
self.inputs.append(val_name_data_input_port)
exec_output_port = NodePort()
exec_output_port.type = 'exec'
self.outputs.append(exec_output_port)
val_output_port = NodePort()
val_output_port.type = 'data'
val_output_port.label = 'val'
self.outputs.append(val_output_port)
class GetVariable_Node(Node):
def __init__(self):
super(GetVariable_Node, self).__init__()
self.title = 'get var'
self.type = 'get variable node'
self.package = 'built in'
self.description = 'gets the value of a variable'
data_input_port = NodePort()
data_input_port.type = 'data'
data_input_port.widget_type = 'std line edit'
data_input_port.widget_pos = 'besides'
self.inputs.append(data_input_port)
data_output_port = NodePort()
data_output_port.type = 'data'
data_output_port.label = 'val'
self.outputs.append(data_output_port)
class NodePort:
# type = ''
# label = ''
def __init__(self):
# general attributes
self.type = '' # TODO: change type to _type (shadowing!)
self.label = ''
self.widget_type = 'std line edit' # only important for data inputs
self.widget_name = '' # only important for data inputs with custom programmed widgets
self.widget_pos = 'under' # " same
| 33.790909 | 117 | 0.636535 |
2c98456d30f5b46db435fa071b8c455399af76a6
| 686 |
py
|
Python
|
diversos/dicionario.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 1 |
2021-12-18T15:29:24.000Z
|
2021-12-18T15:29:24.000Z
|
diversos/dicionario.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | null | null | null |
diversos/dicionario.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 3 |
2021-08-23T22:45:20.000Z
|
2022-02-17T13:17:09.000Z
|
dicionario_sites = {"Diego": "diegomariano.com"}
print(dicionario_sites['Diego'])
dicionario_sites = {"Diego": "diegomariano.com", "Google": "google.com", "Udemy": "udemy.com", "Luiz Carlin" : "luizcarlin.com.br"}
print ("-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=")
for chave in dicionario_sites:
print (chave + " -:- " +dicionario_sites[chave])
print(dicionario_sites[chave])
print ("-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=")
for i in dicionario_sites.items():
print(i)
print ("-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=")
for i in dicionario_sites.values():
print(i)
print ("-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=-=+=")
for i in dicionario_sites.keys():
print(i)
| 32.666667 | 131 | 0.514577 |
e2db99e29e21264d7560a22a15f2efa22bf76450
| 3,167 |
py
|
Python
|
hardware-check.py
|
thysia-zosa/hardware-check
|
fc5803f2fd72559da03914e7e21ff83684fab5a0
|
[
"MIT"
] | null | null | null |
hardware-check.py
|
thysia-zosa/hardware-check
|
fc5803f2fd72559da03914e7e21ff83684fab5a0
|
[
"MIT"
] | null | null | null |
hardware-check.py
|
thysia-zosa/hardware-check
|
fc5803f2fd72559da03914e7e21ff83684fab5a0
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
#
# Authors: Severin Hasler, Melvin Tas, Jonas Tochtermann
# (c) 2021
from pathlib import Path
from datetime import datetime
import os.path
import requests
import yaml
import json
from pyspectator.processor import Cpu
from crontab import CronTab
import sys
# constants
CONFIG_FILE = 'config.yaml'
MAX_CPU_TEMP = 'maxCpuTemp'
CHECK_INTERVAL = 'checkInterval'
TELEGRAM_CHAT = 'telegramChatID'
TELEGRAM_API = 'telegramApiUrl'
TELEGRAM_TOKEN = 'telegramToken'
# initialize main variables
maxCpuTemp = None
checkInterval = None
telegramChatID = None
telegramToken = None
time = str(datetime.now())
log = {}
warnings = []
warningMessage = ''
codePath = str(Path(__file__).parent.absolute()) + '/'
if os.path.isfile(codePath + CONFIG_FILE):
# read config file
try:
with open(codePath + CONFIG_FILE, 'r') as yamlFile:
config = yaml.load(yamlFile, Loader=yaml.CLoader)
if MAX_CPU_TEMP in config:
maxCpuTemp = config[MAX_CPU_TEMP]
if CHECK_INTERVAL in config:
checkInterval = config[CHECK_INTERVAL]
if TELEGRAM_CHAT in config:
telegramChatID = config[TELEGRAM_CHAT]
if TELEGRAM_TOKEN in config:
telegramToken = config[TELEGRAM_TOKEN]
except BaseException as err:
print('Error:', err)
else:
sys.exit('config file missing')
# In case something went wrong, assign default values
if maxCpuTemp == None or isinstance(maxCpuTemp, float) != True:
maxCpuTemp = 80.0
if checkInterval == None or isinstance(checkInterval, int) != True:
checkInterval = 10
# In case something telegrammy is missing, abort: Programm is not runnable
if telegramChatID == None or isinstance(telegramChatID, str) != True or \
telegramToken == None or isinstance(telegramToken, str) != True:
sys.exit('telegram config missing')
# update cronjob, if the user has changed interval time
myCron = CronTab(user=True)
intTime = '*/' + str(checkInterval)
for job in myCron:
if job.comment == 'hardwareCheck' and str(job.minute) != intTime:
job.minute.every(checkInterval)
myCron.write()
# read cpu-temperature
cpu = Cpu(monitoring_latency=1)
temperature = cpu.temperature
log['cpu-temp'] = temperature
# check if cpu-temperature exceeds max
if temperature > maxCpuTemp:
warnings.append('Temperature is too high: ' + \
str(temperature) + ' (max: ' + str(maxCpuTemp) + ')')
# save data to logfile
try:
with open(codePath + 'log.json', 'r+') as logFile:
data = json.load(logFile)
data.update({time: log})
logFile.seek(0)
json.dump(data, logFile, indent=2, ensure_ascii=False)
except BaseException as err:
print('Error:', err)
# write telegram message
if len(warnings) > 0:
warnings.insert(0, 'Your Computer has occurred a problem:')
warningMessage = '\n'.join(warnings)
send_text = 'https://api.telegram.org/' + telegramToken + \
'/sendMessage?chat_id=' + telegramChatID + \
'&parse_mode=Markdown&text=' + warningMessage
try:
response = requests.get(send_text)
except requests.exceptions as err:
print('Error:', err)
| 30.747573 | 74 | 0.691506 |
1a4cf179953a44432def4819e4265ca455404390
| 148 |
py
|
Python
|
src/training/tensorflow/convert_tfjs.py
|
klawr/deepmech
|
61de238f1d4b1b867ec1d5f4e4af2a3b25a5abff
|
[
"MIT"
] | 1 |
2020-04-17T12:27:06.000Z
|
2020-04-17T12:27:06.000Z
|
src/training/tensorflow/convert_tfjs.py
|
klawr/deepmech
|
61de238f1d4b1b867ec1d5f4e4af2a3b25a5abff
|
[
"MIT"
] | 1 |
2022-02-27T13:13:17.000Z
|
2022-02-27T13:13:17.000Z
|
src/training/tensorflow/convert_tfjs.py
|
klawr/deepmech
|
61de238f1d4b1b867ec1d5f4e4af2a3b25a5abff
|
[
"MIT"
] | null | null | null |
import tensorflowjs as tfjs
import tensorflow as tf
model = tf.keras.models.load_model("model.h5")
tfjs.converters.save_keras_model(model, "tfjs")
| 24.666667 | 47 | 0.797297 |
1a7efb1223893d6f492c200694ea59eb1db5bb6b
| 1,315 |
py
|
Python
|
checks/load_favicons_test.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 19 |
2018-04-20T11:03:41.000Z
|
2022-01-12T20:58:56.000Z
|
checks/load_favicons_test.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 160 |
2018-04-05T16:12:59.000Z
|
2022-03-01T13:01:27.000Z
|
checks/load_favicons_test.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 8 |
2018-11-05T13:07:57.000Z
|
2021-06-11T11:46:43.000Z
|
from pprint import pprint
import httpretty
from httpretty import httprettified
import unittest
from checks import load_favicons
from checks.config import Config
@httprettified
class TestFavicons(unittest.TestCase):
def test_favicons(self):
# This site has a favicon
url1 = 'http://example1.com/favicon.ico'
httpretty.register_uri(httpretty.HEAD, url1,
body='',
adding_headers={
"Content-type": "image/x-ico",
})
# This site has no favicon
url2 = 'http://example2.com/favicon.ico'
httpretty.register_uri(httpretty.HEAD, url2,
status=404,
body='Not found',
adding_headers={
"Content-type": "text/plain",
})
config = Config(urls=['http://example1.com/path/', 'http://example2.com/'])
checker = load_favicons.Checker(config=config)
result = checker.run()
pprint(result)
self.assertEqual(result, {
'http://example1.com/path/': {
'url': 'http://example1.com/favicon.ico'
}
})
| 29.886364 | 83 | 0.49962 |
64ae08055048e7cff7b1c281503df2b7128f782e
| 873 |
py
|
Python
|
quant/example/ex_binance.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/example/ex_binance.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/example/ex_binance.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
from quant.brokers.broker_factory import create_brokers
pair_code = 'Binance_ZRX_ETH'
'''test broker'''
brokers = create_brokers([pair_code])
broker = brokers[pair_code]
'''sell order'''
# amount = 10
# price = 0.0019
# order_id = broker.sell_limit(amount=amount, price=price)
# if order_id:
# print('sell order success, id = %s' % order_id)
# else:
# print('sell order failed')
'''buy order'''
'''get order 5863126'''
# order = broker.get_order(order_id=5863126)
# if order:
# print('get order success, %s' % order)
# else:
# print('get order failed')
'''cancel order 5957505'''
# order_id = 5957505
# res = broker.cancel_order(order_id=order_id)
# if res:
# print('cancel order: % success' % order_id)
# else:
# print('cancel order: % failed' % order_id)
| 22.384615 | 58 | 0.67354 |
37520517ffd89e15b6585bfcd6ef32eb6050138f
| 679 |
py
|
Python
|
backend/tests.py
|
Marcuse7/openschufa
|
6a44ddca9eda0d2fde7103d4ae76d7e1ac3ee40e
|
[
"MIT"
] | 46 |
2018-05-16T05:52:29.000Z
|
2021-12-10T14:52:52.000Z
|
backend/tests.py
|
algorithmwatch/2018-openschufa
|
6a44ddca9eda0d2fde7103d4ae76d7e1ac3ee40e
|
[
"MIT"
] | 45 |
2018-05-10T05:56:44.000Z
|
2020-09-04T18:41:48.000Z
|
backend/tests.py
|
algorithmwatch/2018-openschufa
|
6a44ddca9eda0d2fde7103d4ae76d7e1ac3ee40e
|
[
"MIT"
] | 8 |
2018-05-16T05:38:46.000Z
|
2020-06-29T09:00:14.000Z
|
import json
from io import BytesIO
def test_ping(app):
client = app.test_client()
resp = client.get('/ping')
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert 'records' in data['message']
assert 'success' in data['status']
def test_add_user(app):
"""Ensure a new user can be added to the database."""
with app.test_client() as client:
data = {
'name': 'test',
'foo': 'bar',
'image_1': (BytesIO(b'my file contents'), "image1.jpg")
}
response = client.post('/upload', content_type='multipart/form-data', data=data)
assert response.status_code == 204
| 27.16 | 88 | 0.603829 |
b36a7b23a68d0e64fd4259f5d1fad5165833f8b4
| 75 |
py
|
Python
|
testserver/blueprints/__init__.py
|
IfengAutomation/AutomationTestServer
|
b16ad0fae9cd7198a93cf2f3cd68d46fdae2dbdd
|
[
"Apache-2.0"
] | null | null | null |
testserver/blueprints/__init__.py
|
IfengAutomation/AutomationTestServer
|
b16ad0fae9cd7198a93cf2f3cd68d46fdae2dbdd
|
[
"Apache-2.0"
] | null | null | null |
testserver/blueprints/__init__.py
|
IfengAutomation/AutomationTestServer
|
b16ad0fae9cd7198a93cf2f3cd68d46fdae2dbdd
|
[
"Apache-2.0"
] | null | null | null |
from . import api, casemanager, users
__all__ = [api, casemanager, users]
| 18.75 | 37 | 0.733333 |
ff54c0f94664c3fe7c5610fbab4fb2b28d06536d
| 510 |
py
|
Python
|
src/python/fibonacci.py
|
allenjzhang/playground
|
ef32d383d6c1751e204cb77db6658c6ed72624ad
|
[
"MIT"
] | 1 |
2020-06-10T11:34:59.000Z
|
2020-06-10T11:34:59.000Z
|
src/python/fibonacci.py
|
allenjzhang/playground
|
ef32d383d6c1751e204cb77db6658c6ed72624ad
|
[
"MIT"
] | null | null | null |
src/python/fibonacci.py
|
allenjzhang/playground
|
ef32d383d6c1751e204cb77db6658c6ed72624ad
|
[
"MIT"
] | 2 |
2020-05-26T06:39:04.000Z
|
2020-11-16T06:34:23.000Z
|
memMap = {}
def fibonacci (n):
if (n not in memMap):
if n <= 0:
print("Invalid input")
elif n == 1:
memMap[n] = 0
elif n == 2:
memMap[n] = 1
else:
memMap[n] = fibonacci (n-1) + fibonacci (n-2)
return memMap[n]
def fibonacciSlow (n):
if n <= 0:
print("Invalid input")
elif n == 1:
return 0
elif n == 2:
return 1
else:
return fibonacci (n-1) + fibonacci (n-2)
print(fibonacci (1000))
print("---------------")
print(fibonacciSlow (1000))
| 18.214286 | 51 | 0.527451 |
443a9462413497637a0060c472a67586814d927f
| 19,356 |
py
|
Python
|
tests/onegov/election_day/views/test_views_manage.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/views/test_views_manage.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/views/test_views_manage.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import date
from lxml.html import document_fromstring
from onegov.ballot import ProporzElection
from onegov.election_day.collections import ArchivedResultCollection
from onegov.election_day.layouts import ElectionLayout
from tests.onegov.election_day.common import login
from tests.onegov.election_day.common import upload_election_compound
from tests.onegov.election_day.common import upload_majorz_election
from tests.onegov.election_day.common import upload_party_results
from tests.onegov.election_day.common import upload_proporz_election
from tests.onegov.election_day.common import upload_vote
from webtest import TestApp as Client
from tests.onegov.election_day.common import DummyRequest
def test_view_login_logout(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login = client.get('/').click('Anmelden')
login.form['username'] = '[email protected]'
login.form['password'] = 'hunter1'
assert "Unbekannter Benutzername oder falsches Passwort" \
in login.form.submit()
assert 'Anmelden' in client.get('/')
login.form['password'] = 'hunter2'
homepage = login.form.submit().follow()
assert 'Sie sind angemeldet' in homepage
assert 'Abmelden' in homepage
assert 'Anmelden' not in homepage
assert 'Anmelden' in client.get('/').click('Abmelden').follow()
def test_view_manage_elections(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/elections',
expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/elections')
assert "Noch keine Wahlen erfasst" in manage
new = manage.click('Neue Wahl')
new.form['election_de'] = 'Elect a new president'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form['mandates'] = 1
manage = new.form.submit().follow()
assert "Elect a new president" in manage
edit = manage.click('Bearbeiten')
edit.form['election_de'] = 'Elect a new federal councillor'
edit.form['absolute_majority'] = None
manage = edit.form.submit().follow()
assert "Elect a new federal councillor" in manage
assert "Elect a new federal councillor" == archive.query().one().title
delete = manage.click("Löschen")
assert "Wahl löschen" in delete
assert "Elect a new federal councillor" in delete
assert "Bearbeiten" in delete.click("Abbrechen")
manage = delete.form.submit().follow()
assert "Noch keine Wahlen erfasst" in manage
assert archive.query().count() == 0
def test_view_manage_election_compounds(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/election-compounds',
expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/election-compounds')
assert "Noch keine Verbindungen" in manage
# Add two elections
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Elect a new parliament (Region A)'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'proporz'
new.form['domain'] = 'region'
new.form['mandates'] = 10
new.form.submit().follow()
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Elect a new parliament (Region B)'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'proporz'
new.form['domain'] = 'region'
new.form['mandates'] = 5
new.form.submit().follow()
# Add a compound
new = client.get('/manage/election-compounds').click('Neue Verbindung')
new.form['election_de'] = 'Elect a new parliament'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'canton'
new.form['elections'] = ['elect-a-new-parliament-region-a']
manage = new.form.submit().follow()
assert "Elect a new parliament" in manage
edit = manage.click('Bearbeiten')
edit.form['election_de'] = 'Elect a new cantonal parliament'
edit.form['elections'] = [
'elect-a-new-parliament-region-a',
'elect-a-new-parliament-region-b'
]
manage = edit.form.submit().follow()
assert "Elect a new cantonal parliament" in manage
assert "Elect a new cantonal parliament" in [
a.title for a in archive.query()
]
delete = manage.click("Löschen")
assert "Verbindung löschen" in delete
assert "Elect a new cantonal parliament" in delete
assert "Bearbeiten" in delete.click("Abbrechen")
manage = delete.form.submit().follow()
assert "Noch keine Verbindungen" in manage
assert archive.query().count() == 2
def test_view_manage_votes(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/votes', expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/votes')
assert "Noch keine Abstimmungen erfasst" in manage
new = manage.click('Neue Abstimmung')
new.form['vote_de'] = 'Vote for a better yesterday'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'federation'
manage = new.form.submit().follow()
assert "Vote for a better yesterday" in manage
edit = manage.click('Bearbeiten')
edit.form['vote_de'] = 'Vote for a better tomorrow'
manage = edit.form.submit().follow()
assert "Vote for a better tomorrow" in manage
assert "Vote for a better tomorrow" == archive.query().one().title
delete = manage.click("Löschen")
assert "Abstimmung löschen" in delete
assert "Vote for a better tomorrow" in delete
assert "Bearbeiten" in delete.click("Abbrechen")
manage = delete.form.submit().follow()
assert "Noch keine Abstimmungen erfasst" in manage
assert archive.query().count() == 0
def test_upload_proporz_election(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
upload_proporz_election(client, canton='zg')
session = election_day_app.session_manager.session()
election = session.query(ProporzElection).one()
assert election.type == 'proporz'
request = DummyRequest(session, election_day_app)
layout = ElectionLayout(election, request, 'lists-panachage')
assert layout.visible
def test_view_clear_results(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client, canton='zg')
upload_proporz_election(client, canton='zg')
upload_election_compound(client, canton='zg')
upload_party_results(client)
upload_party_results(client, slug='elections/elections')
upload_vote(client)
# Test currently fails for lists / panachage because
# layout.visible is False because' self.proporz is False!?!
marker = "<h2>Resultate</h2>"
i_marker = "<h2>Zwischenergebnisse</h2>"
urls = (
'/election/majorz-election/candidates',
'/election/majorz-election/statistics',
'/election/proporz-election/lists',
'/election/proporz-election/candidates',
'/election/proporz-election/connections',
'/election/proporz-election/party-strengths',
'/election/proporz-election/parties-panachage',
# '/election/proporz-election/lists-panachage',
'/election/proporz-election/statistics',
'/elections/elections/parties-panachage',
'/elections/elections/party-strengths',
'/vote/vote/entities'
)
for url in urls:
page = client.get(url)
if marker not in page and i_marker not in page:
print(url)
assert False
client.get('/election/majorz-election/clear').form.submit()
client.get('/election/proporz-election/clear').form.submit()
client.get('/elections/elections/clear').form.submit()
client.get('/vote/vote/clear').form.submit()
assert all((marker not in client.get(url) for url in urls))
def test_view_manage_upload_tokens(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
assert "Noch keine Token." in client.get('/manage/upload-tokens')
client.get('/manage/upload-tokens/create-token').form.submit()
assert "Noch keine Token." not in client.get('/manage/upload-tokens')
client.get('/manage/upload-tokens').click("Löschen").form.submit()
assert "Noch keine Token." in client.get('/manage/upload-tokens')
def test_view_manage_data_sources(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
# Votes
# ... add data source
new = client.get('/manage/sources/new-source')
new.form['name'] = 'ds_vote'
new.form['upload_type'] = 'vote'
new.form.submit().follow()
assert 'ds_vote' in client.get('/manage/sources')
# ... regenerate token
manage = client.get('/manage/sources')
token = manage.pyquery('.data_sources td')[2].text
manage = manage.click('Token neu erzeugen').form.submit().follow()
assert token not in manage
# ... manage
manage = manage.click('Verwalten', href='data-source').follow()
assert 'Noch keine Abstimmungen erfasst' in manage.click('Neue Zuordnung')
new = client.get('/manage/votes/new-vote')
new.form['vote_de'] = "vote-1"
new.form['date'] = date(2013, 1, 1)
new.form['domain'] = 'federation'
new.form.submit()
new = client.get('/manage/votes/new-vote')
new.form['vote_de'] = "vote-2"
new.form['date'] = date(2014, 1, 1)
new.form['domain'] = 'federation'
new.form.submit()
new = manage.click('Neue Zuordnung')
assert all((x in new for x in ('vote-1', 'vote-2')))
new.form['district'] = '1111'
new.form['number'] = '2222'
new.form['item'] = 'vote-1'
manage = new.form.submit().follow()
assert all((x in manage for x in ('vote-1', '1111', '2222')))
edit = manage.click('Bearbeiten')
edit.form['district'] = '3333'
edit.form['number'] = '4444'
edit.form['item'] = 'vote-2'
manage = edit.form.submit().follow()
assert all((x not in manage for x in ('vote-1', '1111', '2222')))
assert all((x in manage for x in ('vote-2', '3333', '4444')))
manage = manage.click('Löschen').form.submit().follow()
assert 'Noch keine Zuordnungen' in manage
# ... delete data source
client.get('/manage/sources').click('Löschen').form.submit()
assert 'ds_vote' not in client.get('/manage/sources')
assert 'Noch keine Datenquellen' in client.get('/manage/sources')
# Majorz elections
new = client.get('/manage/elections/new-election')
new.form['election_de'] = "election-majorz"
new.form['date'] = date(2013, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
new = client.get('/manage/elections/new-election')
new.form['election_de'] = "election-proporz"
new.form['date'] = date(2013, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
# ... add data source
new = client.get('/manage/sources/new-source')
new.form['name'] = 'ds_majorz'
new.form['upload_type'] = 'majorz'
new.form.submit().follow()
assert 'ds_majorz' in client.get('/manage/sources')
# ... manage
manage = client.get('/manage/sources')
manage = manage.click('Verwalten', href='data-source').follow()
new = manage.click('Neue Zuordnung')
assert 'election-majorz' in new
assert 'election-proporz' not in new
new.form['district'] = '4444'
new.form['number'] = '5555'
new.form['item'] = 'election-majorz'
manage = new.form.submit().follow()
assert all((x in manage for x in ('election-majorz', '4444', '5555')))
# ... delete data source
client.get('/manage/sources').click('Löschen').form.submit()
assert 'ds_majorz' not in client.get('/manage/sources')
assert 'Noch keine Datenquellen' in client.get('/manage/sources')
# Proporz elections
# ... add data source
new = client.get('/manage/sources/new-source')
new.form['name'] = 'ds_proporz'
new.form['upload_type'] = 'proporz'
new.form.submit().follow()
assert 'ds_proporz' in client.get('/manage/sources')
# ... manage
manage = client.get('/manage/sources')
manage = manage.click('Verwalten', href='data-source').follow()
new = manage.click('Neue Zuordnung')
assert 'election-majorz' not in new
assert 'election-proporz' in new
new.form['district'] = '6666'
new.form['number'] = '7777'
new.form['item'] = 'election-proporz'
manage = new.form.submit().follow()
assert all((x in manage for x in ('election-proporz', '6666', '7777')))
# ... delete data source
client.get('/manage/sources').click('Löschen').form.submit()
assert 'ds_proporz' not in client.get('/manage/sources')
assert 'Noch keine Datenquellen' in client.get('/manage/sources')
def test_reset_password(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
request_page = client.get('/auth/login').click('Passwort zurücksetzen')
assert 'Passwort zurücksetzen' in request_page
request_page.form['email'] = '[email protected]'
assert '[email protected]' in request_page.form.submit()
assert len(election_day_app.smtp.outbox) == 0
request_page.form['email'] = '[email protected]'
assert '[email protected]' in request_page.form.submit()
assert len(election_day_app.smtp.outbox) == 1
message = election_day_app.smtp.outbox[0]
message = message.get_payload(1).get_payload(decode=True)
message = message.decode('iso-8859-1')
link = list(document_fromstring(message).iterlinks())[0][2]
token = link.split('token=')[1]
reset_page = client.get(link)
assert token in reset_page.text
reset_page.form['email'] = '[email protected]'
reset_page.form['password'] = 'new_password'
reset_page = reset_page.form.submit()
assert "Ungültige Adresse oder abgelaufener Link" in reset_page
assert token in reset_page.text
reset_page.form['email'] = '[email protected]'
reset_page.form['password'] = '1234'
reset_page = reset_page.form.submit()
assert "Feld muss mindestens 8 Zeichen beinhalten" in reset_page
assert token in reset_page.text
reset_page.form['email'] = '[email protected]'
reset_page.form['password'] = 'new_password'
assert "Passwort geändert" in reset_page.form.submit()
reset_page.form['email'] = '[email protected]'
reset_page.form['password'] = 'new_password'
reset_page = reset_page.form.submit()
assert "Ungültige Adresse oder abgelaufener Link" in reset_page
login_page = client.get('/auth/login')
login_page.form['username'] = '[email protected]'
login_page.form['password'] = 'hunter2'
login_page = login_page.form.submit()
assert "Unbekannter Benutzername oder falsches Passwort" in login_page
login_page.form['username'] = '[email protected]'
login_page.form['password'] = 'new_password'
login_page = login_page.form.submit().follow()
assert "Sie sind angemeldet" in login_page
def test_view_manage_screens(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/screens', expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/screens')
assert 'Noch keine Screens' in manage
# Add two votes
new = client.get('/manage/votes').click('Neue Abstimmung')
new.form['vote_de'] = 'Einfache Vorlage'
new.form['vote_type'] = 'simple'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'federation'
new.form.submit().follow()
new = client.get('/manage/votes').click('Neue Abstimmung')
new.form['vote_de'] = 'Vorlage mit Gegenentwurf'
new.form['vote_type'] = 'complex'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'federation'
new.form.submit().follow()
# Add two elections
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Majorz Wahl'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'majorz'
new.form['domain'] = 'region'
new.form['mandates'] = 10
new.form.submit().follow()
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Proporz Wahl'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'proporz'
new.form['domain'] = 'region'
new.form['mandates'] = 5
new.form.submit().follow()
# Add a compound
new = client.get('/manage/election-compounds').click('Neue Verbindung')
new.form['election_de'] = 'Verbund von Wahlen'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'canton'
new.form['elections'] = ['proporz-wahl']
new.form.submit().follow()
# Add a screen
new = client.get('/manage/screens').click('Neuer Screen')
new.form['number'] = '5'
new.form['description'] = 'Mein Screen'
new.form['type'] = 'majorz_election'
new.form['majorz_election'] = 'majorz-wahl'
new.form['structure'] = '<title />'
new.form['css'] = '/* Custom CSS */'
manage = new.form.submit().follow()
assert 'Mein Screen' in manage
assert 'Majorz Wahl' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'proporz_election'
edit.form['proporz_election'] = 'proporz-wahl'
manage = edit.form.submit().follow()
assert 'Majorz Wahl' not in manage
assert 'Proporz Wahl' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'election_compound'
edit.form['election_compound'] = 'verbund-von-wahlen'
manage = edit.form.submit().follow()
assert 'Majorz Wahl' not in manage
assert 'Proporz Wahl' not in manage
assert 'Verbund von Wahlen' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'simple_vote'
edit.form['simple_vote'] = 'einfache-vorlage'
manage = edit.form.submit().follow()
assert 'Majorz Wahl' not in manage
assert 'Proporz Wahl' not in manage
assert 'Verbund von Wahlen' not in manage
assert 'Einfache Vorlage' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'complex_vote'
edit.form['complex_vote'] = 'vorlage-mit-gegenentwurf'
manage = edit.form.submit().follow()
assert 'Majorz Wahl' not in manage
assert 'Proporz Wahl' not in manage
assert 'Verbund von Wahlen' not in manage
assert 'Einfache Vorlage' not in manage
assert 'Vorlage mit Gegenentwurf' in manage
delete = manage.click('Löschen')
assert 'Screen löschen' in delete
assert 'Bearbeiten' in delete.click('Abbrechen')
manage = delete.form.submit().follow()
assert 'Noch keine Screens' in manage
| 34.938628 | 79 | 0.671936 |
2b9926939d5419688aef9a14054cb84c8273ba54
| 2,872 |
py
|
Python
|
Lab_02/gcd_fsm.py
|
SadequrRahman/advance-SoC
|
35da93adfcdb1b4ec740cb44ffc54d9c8cc7adc4
|
[
"BSD-4-Clause-UC"
] | null | null | null |
Lab_02/gcd_fsm.py
|
SadequrRahman/advance-SoC
|
35da93adfcdb1b4ec740cb44ffc54d9c8cc7adc4
|
[
"BSD-4-Clause-UC"
] | null | null | null |
Lab_02/gcd_fsm.py
|
SadequrRahman/advance-SoC
|
35da93adfcdb1b4ec740cb44ffc54d9c8cc7adc4
|
[
"BSD-4-Clause-UC"
] | null | null | null |
#
# Copyright (C) 2019 Mohammad Sadequr Rahman <[email protected]>
#
# This file is part of Advance SoC Design Lab Soultion.
#
# SoC Design Lab Soultion can not be copied and/or distributed without the express
# permission of Mohammad Sadequr Rahman
#
# File: gcd_fsm.py
# This is a pymtl gcd gloden algo. implementation.
#
# Inputs:
# a -> first value to calculated gcd
# b -> second value to calculated gcd
# en -> to enable the block. After updating a and b value
# assert en signal
# Outputs:
# out -> output of the block. contain gcd result of a and b
# ack -> asserted high to indicate current gcd calculation is done.
#
from pymtl3 import *
class Gcd_fsm( Component ):
def construct(s, dType ):
s.a = InPort(dType)
s.b = InPort(dType)
s.en = InPort(b1)
s.out = OutPort(dType)
s.ack = OutPort(b1)
s.ra = Wire(dType)
s.rb = Wire(dType)
s.cState = Wire(b3)
s.nState = Wire(b3)
s.S0 = b3(0)
s.S1 = b3(1)
s.S2 = b3(2)
s.S3 = b3(3)
s.S4 = b3(4)
s.S5 = b3(5)
@s.update_ff
def state_memory():
if s.reset :
s.cState <<= s.S0
else:
s.cState <<= s.nState
@s.update
def next_state_logic():
if s.cState == s.S0:
if s.en == b1(1):
s.nState = s.S1
else:
s.nState = s.S0
elif s.cState == s.S1:
s.nState = s.S2
elif s.cState == s.S2:
if s.ra < s.rb :
s.nState = s.S3
elif s.rb != dType(0):
s.nState = s.S4
else:
s.nState = s.S5
elif s.cState == s.S3:
s.nState = s.S2
elif s.cState == s.S4:
s.nState = s.S2
elif s.cState == s.S5:
if s.en == b1(0):
s.nState = s.S0
else:
s.nState = s.S5
@s.update
def output_logic():
if s.cState == s.S0:
s.ack = b1(0)
elif s.cState == s.S1:
s.ra = s.a
s.rb = s.b
# elif s.cState == s.S2:
# pass
elif s.cState == s.S3:
s.ra = s.ra + s.rb
s.rb = s.ra - s.rb
s.ra = s.ra - s.rb
elif s.cState == s.S4:
s.ra = s.ra - s.rb
elif s.cState == s.S5:
s.out = s.ra
s.ack = b1(1)
| 24.758621 | 82 | 0.405641 |
5b64bc81fb6aabc52e204679478b58d2ab1aab95
| 20,126 |
py
|
Python
|
dialogs/top_level_dialog.py
|
Maxwingber/corobot
|
a959e0deba734d3900d7b8a826b3fb56964db4c6
|
[
"MIT"
] | null | null | null |
dialogs/top_level_dialog.py
|
Maxwingber/corobot
|
a959e0deba734d3900d7b8a826b3fb56964db4c6
|
[
"MIT"
] | null | null | null |
dialogs/top_level_dialog.py
|
Maxwingber/corobot
|
a959e0deba734d3900d7b8a826b3fb56964db4c6
|
[
"MIT"
] | 2 |
2020-03-22T11:38:54.000Z
|
2020-03-24T11:11:56.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import base64
from datetime import date, time
from botbuilder.core import MessageFactory
from botbuilder.dialogs import (
WaterfallDialog,
DialogTurnResult,
WaterfallStepContext,
ComponentDialog,
ConfirmPrompt, Choice, ChoicePrompt, ChoiceFactory, FindChoicesOptions, ListStyle, DialogTurnStatus)
from botbuilder.dialogs.prompts import PromptOptions, TextPrompt, NumberPrompt
from data_models import UserProfile
from data_models import PersonalData
from dialogs.contact_to_infected import ContactsSelectionDialog
from dialogs.symptoms_selection_dialog import SymptomsSelectionDialog
from dialogs.riskcountry_selection_dialog import RiskCountrySelectionDialog
from dialogs.personaldata import PersonalDataDialog
class TopLevelDialog(ComponentDialog):
def __init__(self, dialog_id: str = None):
super(TopLevelDialog, self).__init__(dialog_id or TopLevelDialog.__name__)
# Key name to store this dialogs state info in the StepContext
self.USER_INFO = "value-userInfo"
self.add_dialog(TextPrompt(TextPrompt.__name__))
self.add_dialog(NumberPrompt(NumberPrompt.__name__))
choice = ChoicePrompt(ChoicePrompt.__name__)
choice.recognizer_options = FindChoicesOptions(allow_partial_matches=True)
self.add_dialog(choice)
self.add_dialog(SymptomsSelectionDialog(SymptomsSelectionDialog.__name__))
self.add_dialog(ContactsSelectionDialog(ContactsSelectionDialog.__name__))
self.add_dialog(PersonalDataDialog(PersonalDataDialog.__name__))
self.add_dialog(RiskCountrySelectionDialog(RiskCountrySelectionDialog.__name__))
self.add_dialog(
WaterfallDialog(
"WFDialog",
[
self.name_step,
self.age_step,
self.confirm_riskcountry_step,
self.start_riskcountry_selection_step,
self.start_symptom_selection_step,
self.temparature_step,
self.start_contacts_step,
self.job_claim_step,
self.job_type_step,
self.personal_data_step,
self.acknowledgement_step,
],
)
)
self.initial_dialog_id = "WFDialog"
async def name_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
# Create an object in which to collect the user's information within the dialog.
step_context.values[self.USER_INFO] = UserProfile()
# Ask the user to enter their name.
prompt_options = PromptOptions(
prompt=MessageFactory.text("Wie heißen Sie denn?")
)
return await step_context.prompt(TextPrompt.__name__, prompt_options)
async def age_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
# Set the user's name to what they entered in response to the name prompt.
user_profile = step_context.values[self.USER_INFO]
user_profile.name = step_context.result
# Ask the user to enter their age.
prompt_options = PromptOptions(
prompt=MessageFactory.text("Hallo " + user_profile.name + "! Wie alt sind Sie?"),
retry_prompt=MessageFactory.text("Bitte geben Sie Ihr Alter als Zahl an.")
)
return await step_context.prompt(NumberPrompt.__name__, prompt_options)
async def confirm_riskcountry_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
user_profile: UserProfile = step_context.values[self.USER_INFO]
user_profile.age = int(step_context.result)
prompt_options = PromptOptions(
choices = [Choice("Ja"), Choice("Nein")],
prompt = MessageFactory.text("Waren Sie dieses Jahr bereits im Ausland?")
)
return await step_context.begin_dialog(ChoicePrompt.__name__, prompt_options)
async def start_riskcountry_selection_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
print("[DEBUG] Received by German choice prompt: " + step_context.result.value)
riskcountry_true = step_context.result.value == "Ja"
if not riskcountry_true:
print("[DEBUG] Skipping risk country selection")
return await step_context.next([[],[]])
else:
print("[DEBUG] Entering risk country selection")
return await step_context.begin_dialog(RiskCountrySelectionDialog.__name__)
async def start_symptom_selection_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
# Set the user's age to what they entered in response to the age prompt.
print("[DEBUG] Arrived in symptom selection")
print("[DEBUG] Risk countries dialog result is " + str(step_context.result))
user_profile: UserProfile = step_context.values[self.USER_INFO]
user_profile.risk_countries = step_context.result[0]
user_profile.risk_country_returndates = step_context.result[1]
if user_profile.risk_countries is not None and len(user_profile.risk_countries) > 0:
for single_date in user_profile.risk_country_returndates:
print("[DEBUG] Looking at return date " + single_date)
print("[DEBUG] Time diff return date: " + str(
int(date.today().strftime("%Y%m%d")) - int(single_date.replace("-", ""))))
if int(date.today().strftime("%Y%m%d")) - int(single_date.replace("-", "")) <= 14:
print("[DEBUG] Set risk country bool to True")
user_profile.risk_countries_bool = True
print("[DEBUG] Risk countries and returndates are\n" + str(user_profile.risk_countries) + "\n" + str(user_profile.risk_country_returndates))
# Otherwise, start the review selection dialog.
return await step_context.begin_dialog(SymptomsSelectionDialog.__name__)
async def temparature_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
# Set the user's name to what they entered in response to the name prompt.
user_profile: UserProfile = step_context.values[self.USER_INFO]
user_profile.symptoms = step_context.result[0]
user_profile.symptoms_dates = step_context.result[1]
print("[DEBUG] Symptoms are " + str(user_profile.symptoms))
print("[DEBUG] Corresponding dates are " + str(user_profile.symptoms))
if user_profile.symptoms is not None and len(user_profile.symptoms) > 0 and (any(user_profile.symptoms) is x for x in ['Husten', 'Lungenentzündung', 'Fieber']):
print("[DEBUG] Setting critical symtoms bool to true with symptoms " + str(user_profile.symptoms))
user_profile.critical_symptoms_bool = True
if "Fieber" in user_profile.symptoms:
prompt_options = PromptOptions(
prompt=MessageFactory.text("Wie hoch ist Ihr Fieber in Grad Celsius (°C)?")
)
return await step_context.prompt(TextPrompt.__name__, prompt_options)
else:
print("[DEBUG] Skipping fever temparature input")
return await step_context.next("0")
async def start_contacts_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
# Set the user's age to what they entered in response to the age prompt.
user_profile: UserProfile = step_context.values[self.USER_INFO]
user_profile.fever_temp = float(step_context.result.replace(",", "."))
# tart the contacts dialog.
return await step_context.begin_dialog(ContactsSelectionDialog.__name__)
async def job_claim_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
user_profile: UserProfile = step_context.values[self.USER_INFO]
# Storing contacts and setting bools
contact_dates = step_context.result
user_profile.contact_risk_1_date = contact_dates[0]
user_profile.contact_risk_2_date = contact_dates[1]
print("[DEBUG] Current date " + date.today().strftime("%Y%m%d"))
if contact_dates[0] is not None:
print("[DEBUG] " + contact_dates[0])
print("[DEBUG] Time diff risk contact 1: " + str(int(date.today().strftime("%Y%m%d")) - int(user_profile.contact_risk_1_date.replace("-", ""))))
if int(date.today().strftime("%Y%m%d")) - int(user_profile.contact_risk_1_date.replace("-", "")) <= 14:
user_profile.contact_risk_1_bool = True
if contact_dates[1] is not None:
print("[DEBUG] " + contact_dates[1])
print("[DEBUG] Time diff risk contact 2: " + str(int(date.today().strftime("%Y%m%d")) - int(user_profile.contact_risk_2_date.replace("-", ""))))
if int(date.today().strftime("%Y%m%d")) - int(user_profile.contact_risk_2_date.replace("-", "")) <= 14:
user_profile.contact_risk_2_bool = True
return await step_context.begin_dialog(ChoicePrompt.__name__, PromptOptions(
prompt=MessageFactory.text("Arbeiten Sie in einem systemkritischen Bereich?"),
choices=[Choice("Ja"), Choice("Nein")]
))
async def job_type_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
if step_context.result.value == "Ja":
print("[DEBUG] Recognized system cricital job claim")
return await step_context.begin_dialog(ChoicePrompt.__name__, PromptOptions(
prompt=MessageFactory.text("Zu welcher systemkritischen Gruppe gehören Sie?"),
choices=["Polizei", "Feuerwehr", "RichterIn", "Staatsanwälte", "Justizvollzug", "Rettungsdienst", "THW",
"Katastrophenschutz", "Mediziner", "Pfleger", "Apotheher", "**Keine**"],
style=ListStyle.list_style
))
else:
return await step_context.next(Choice("**Keine**"))
async def personal_data_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
# Set the user's company selection to what they entered in the review-selection dialog.
user_profile: UserProfile = step_context.values[self.USER_INFO]
if step_context.result.value != "**Keine**":
user_profile.critical_job = step_context.result.value
# If the user was in contact with a confirmed case in the past 14 days, he needs to add his personal data and contact the GA
if user_profile.contact_risk_1_bool is True:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Da Sie als Kontaktperson der Kategorie 1 eingestuft werden, **melden Sie sich bitte sofort bei Ihrem zuständigen Gesundheitsamt**. Außerdem bitten wir Sie noch einige persönliche Daten für die Übermittlung an das Gesundheitsamt bereitzustellen. **Überwachen Sie bitte zudem Ihre Symptome**, **verlassen Sie Ihre Wohnung so wenig wie möglich** und **reduzieren Sie Ihren Kontakt zu anderen Personen auf das Nötigste**. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# Start the personal data dialog.
return await step_context.begin_dialog(PersonalDataDialog.__name__)
if user_profile.risk_countries_bool is True and user_profile.critical_symptoms_bool is True:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Da Sie sich in den letzten 14 Tagen in einer Risikoregion aufgehalten haben und für Covid-19-typische Symptome zeigen, **melden Sie sich bitte bei Ihrem zuständigen Gesundheitsamt**. Außerdem bitten wir Sie noch einige persönliche Daten für die Übermittlung an das Gesundheitsamt bereitzustellen. **Überwachen Sie bitte zudem Ihre Symptome**, **verlassen Sie Ihre Wohnung so wenig wie möglich** und **reduzieren Sie Ihren Kontakt zu anderen Personen auf das Nötigste**. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# Start the personal data dialog.
return await step_context.begin_dialog(PersonalDataDialog.__name__)
if user_profile.contact_risk_2_bool is True:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Bitte warten Sie ab, ob sich Ihre Kontaktperson als bestätigter Fall herausstellt. Sollte sich der Fall bestätigen, melden Sie sich bitte bei Ihrem zuständigen Gesundheitsamt. Für diesen Fall bitten wir Sie noch einige persönliche Daten für die Übermittlung an das Gesundheitsamt bereitzustellen. **Überwachen Sie zudem bitte Ihre Symptome**, **verlassen Sie Ihre Wohnung so wenig wie möglich** und **reduzieren Sie Ihren Kontakt zu anderen Personen auf das Nötigste**. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# Start the personal data dialog.
return await step_context.begin_dialog(PersonalDataDialog.__name__)
if user_profile.critical_symptoms_bool is True and user_profile.critical_job is True:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Sie gelten nicht als Kontaktperson, arbeiten jedoch in einem systemkritischen Beruf. Bitte **melden Sie sich bei Ihrem zuständigen Gesundheitsamt**. Außerdem bitten wir Sie noch einige persönliche Daten für die Übermittlung an das Gesundheitsamt bereitzustellen. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# Start the personal data dialog.
return await step_context.begin_dialog(PersonalDataDialog.__name__)
if user_profile.risk_countries_bool is True:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Da Sie sich in den letzten 14 Tagen in einer Risikoregion aufgehalten haben, **überwachen Sie bitte ob Sie Covid-19 typische Symptome entwickeln**, **verlassen Sie Ihre Wohnung so wenig wie möglich** und **reduzieren Sie Ihren Kontakt zu anderen Personen auf das Nötigste**. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# Start the personal data dialog.
return await step_context.next(PersonalData())
if user_profile.critical_symptoms_bool is True and user_profile.age > 59:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Sie gelten nicht als Kontaktperson, gehören jedoch zu einer erhöhten Risikogruppe. Bitte **überwachen Sie Ihre Symptome**, **verlassen Sie Ihre Wohnung so wenig wie möglich** und **reduzieren Sie Ihren Kontakt zu anderen Personen auf das Nötigste**. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# No personal data required. Return empty personal data.
return await step_context.next(PersonalData())
if user_profile.critical_symptoms_bool is True:
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(
f"Sie gelten nicht als Kontaktperson. Bitte **überwachen Sie Ihre Symptome**, **verlassen Sie Ihre Wohnung so wenig wie möglich** und **reduzieren Sie Ihren Kontakt zu anderen Personen auf das Nötigste**. Empfehlungen zu Ihrem weiteren Handeln finden Sie auf den Seiten des Robert Koch-Instituts (rki.de)")
)
# No personal data required. Return empty personal data.
return await step_context.next(PersonalData())
# No personal data required. Return empty personal data.
else:
return await step_context.next(PersonalData())
async def acknowledgement_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
# Set the user's personal data to what they entered in the personal data dialog.
user_profile: UserProfile = step_context.values[self.USER_INFO]
user_profile.personal_data = None #SCHAUEN OB NÖTIG
user_profile.personal_data = step_context.result
#time.sleep(1)
# Thank them for participating.
await step_context.context.send_activity(
MessageFactory.text(f"Danke für Ihre Mithilfe und das Beantworten der Fragen, {user_profile.name}. Bitte halten Sie sich an die aktuell geltenden Regelungen und Empfehlungen der Behörden und des Robert Koch-Instituts (rki.de).")
)
#time.sleep(1)
await step_context.context.send_activity(
MessageFactory.text(f"Bei weiterer Kommunikation mit Behörden können Sie folgende Zusammenfassung anhängen und sparen "
f"sich lästige erneute Nachfragen.")
)
ausgabe = "**Wichtige Daten für Ihr Gesundheitsamt**\n\n"
#ausgabe = "Ihre Angaben:"
try:
ausgabe += "\n\nName, Vorname: " + user_profile.personal_data.family_name + ", " + user_profile.personal_data.first_name
ausgabe += "\n\nGeburtsdatum: " + user_profile.personal_data.birthday
ausgabe += "\n\nGeschlecht: " + user_profile.personal_data.gender
ausgabe += "\n\nAdresse: " + user_profile.personal_data.street + ", " + user_profile.personal_data.zipcode + " " + user_profile.personal_data.city
ausgabe += "\n\nTelefonnr.: " + user_profile.personal_data.telephone
ausgabe += "\n\nEmail: " + user_profile.personal_data.email
except:
print("[DEBUG] no personal_data")
take_out = ""
take_out += "\n\nSymptome: "
if (len(user_profile.symptoms) > 0):
for i in range(0,len(user_profile.symptoms)):
take_out += user_profile.symptoms[i] + " seit " + user_profile.symptoms_dates[i] + ", "#
take_out = take_out[0:len(take_out)-2]
else:
take_out += "keine"
if (user_profile.fever_temp != 0.0):
take_out += "\n\nFiebertemperatur: " + str(user_profile.fever_temp).replace(".", ",") + "°C"
take_out += "\n\nBesuchte Risikogebiete: "
if (user_profile.risk_countries_bool):
for i in range(0, len(user_profile.risk_countries)):
take_out += user_profile.risk_countries[i] + " bis " + user_profile.risk_country_returndates[i] + ", "
take_out = take_out[0:len(take_out)-2]
else:
take_out += "keine"
ausgabe += take_out
ausgabe += "\n\nKontakt mit infizierter Person: "
if user_profile.contact_risk_1_date is not None:
ausgabe += "ja, am " + str(user_profile.contact_risk_1_date)
else:
ausgabe += "nein"
ausgabe += "\n\nKontakt mit Verdachtsperson: "
if user_profile.contact_risk_2_date is not None:
ausgabe += "ja, am " + str(user_profile.contact_risk_2_date)
else:
ausgabe += "nein"
ausgabe += "\n\nFunktionsträger: "
if user_profile.critical_job is not None:
ausgabe += user_profile.critical_job
else:
ausgabe += "nein"
#time.sleep(1)
await step_context.context.send_activity(
MessageFactory.text(ausgabe)
)
print("[DEBUG] Final user object created:\n" + str(user_profile.__dict__))
# Exit the dialog, returning the collected user information.
return await step_context.end_dialog(user_profile)
| 54.690217 | 593 | 0.674898 |
7541d60d211572e9b3ba5a1c8b51f97ba28e9286
| 686 |
py
|
Python
|
backend/api/migrations/0007_auto_20210930_1352.py
|
giacomooo/CASFEE_Project2
|
420ff488d6b9deefe6623a45ecfed299f97a4639
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0007_auto_20210930_1352.py
|
giacomooo/CASFEE_Project2
|
420ff488d6b9deefe6623a45ecfed299f97a4639
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0007_auto_20210930_1352.py
|
giacomooo/CASFEE_Project2
|
420ff488d6b9deefe6623a45ecfed299f97a4639
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-09-30 11:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_reservation_iscanceled'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='Amount',
field=models.DecimalField(decimal_places=2, default=10, max_digits=7),
preserve_default=False,
),
migrations.AddField(
model_name='reservation',
name='PricePerHour',
field=models.DecimalField(decimal_places=2, default=3, max_digits=5),
preserve_default=False,
),
]
| 26.384615 | 82 | 0.603499 |
3416327fb0f9f3461b62f07b8caef06cd7307131
| 585 |
py
|
Python
|
python/image_processing/closing.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/image_processing/closing.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/image_processing/closing.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import cv2
import numpy as np
img = cv2.imread('closing.png',0)
kernel = np.ones((5,5),np.uint8)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('blackhat',blackhat)
cv2.imshow('image cv2',img)
cv2.imshow('closing',closing)
cv2.imshow('image erosion',tophat)
cv2.imshow('tophat',gradient)
cv2.waitKey(0)
# to save the image
# cv2.imwrite('image1.png',img)
cv2.destroyAllWindows()
| 24.375 | 60 | 0.753846 |
caa3e773801e8aa2ce7d83c2194444a7b7c51d08
| 1,146 |
py
|
Python
|
exercise00/start.py
|
tschibu/hslu-ipcv-exercises
|
2d42c2fddfecee4f7694ade378cce22a058bc8ec
|
[
"MIT"
] | 1 |
2020-07-16T06:23:10.000Z
|
2020-07-16T06:23:10.000Z
|
exercise00/start.py
|
tschibu/hslu-ipcv-exercises
|
2d42c2fddfecee4f7694ade378cce22a058bc8ec
|
[
"MIT"
] | null | null | null |
exercise00/start.py
|
tschibu/hslu-ipcv-exercises
|
2d42c2fddfecee4f7694ade378cce22a058bc8ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/python3
"""
"""
# =============================================================================
# Imports
# =============================================================================
import cv2
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
# Matplot-Params
# Change size from Plots
plt.rcParams['font.size'] = 6
plt.rcParams['figure.dpi']= 100
plt.rcParams['lines.linewidth']= 1
# read img file
image = cv2.imread("data/lena_std.tiff")
# plot image
plt.imshow(image)
plt.show()
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
print(image_rgb[0, 0]) # RGB value at pixel (0,0)
print(image_rgb[0, 0, 0]) # Red value (same pixel)
# y=250:280, x=250:360
image_rgb[250:280, 250:360] = [255, 255, 255]
plt.imshow(image_rgb)
plt.show()
#
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# bw
plt.subplot(1, 2, 1)
plt.imshow(image_bw)
plt.subplot(1, 2, 2)
plt.imshow(image_rgb)
# gray
plt.subplot(1, 2, 1)
plt.imshow(image_gray, 'gray')
plt.subplot(1, 2, 2)
plt.imshow(image_rgb)
| 20.836364 | 79 | 0.608202 |
1ba3d2025d7b7ce0fc0f3a74d63ef0e4894010e3
| 5,592 |
py
|
Python
|
api/linkCreationHelpers.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | 2 |
2021-03-23T20:32:38.000Z
|
2021-04-21T11:20:12.000Z
|
api/linkCreationHelpers.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | 4 |
2021-04-19T11:00:55.000Z
|
2021-04-20T08:21:48.000Z
|
api/linkCreationHelpers.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Corona-Info-App
#
# © 2020 Tobias Höpp.
# Include utilities
import urllib
import json
from sqlalchemy import or_
import bs4
import visvalingamwyatt as vw
# Include db connection
from main import db, api
# Include models
from models.districts import districts, updateDistrictIncidence, createRegionIfNotExists
from models.measures import sources, regionHasGroup, display, createSource
from utils.measure_utils import createDefaultGroup
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
import requests
#For multithreading
import multiprocessing
import threading
from queue import Queue
def part1():
with open('landkreise.json') as f:
data = json.load(f)
result = {
"ok" : [],
"err" : []
}
for d in data:
region_id = createRegionIfNotExists(d["Bundesland"]).id
print(region_id)
html_soup = bs4.BeautifulSoup(d["Regionale Einschränkungen"], 'html.parser')
for l in html_soup.findAll('a'):
category = None
name = None
if l.text[0:10] == "Landkreis ":
category = "Landkreis"
name = l.text[10:]
elif l.text[-10:] == " Landkreis":
category = "Landkreis"
name = l.text[:-11]
elif l.text[0:11] == "Stadtkreis ":
category = "Stadtkreis"
name = l.text[11:]
elif l.text[0:17] == "Kreisfreie Stadt ":
category = "Kreisfreie Stadt"
name = l.text[17:]
elif l.text[-17:] == " kreisfreie Stadt":
category = "Kreisfreie Stadt"
name = l.text[:-18]
elif l.text[0:6] == "Stadt ":
category = "Kreisfreie Stadt"
name = l.text[6:]
elif l.text[0:6] == "Kreis ":
category = "Landkreis"
name = l.text[6:]
elif not "RKI" in l.text:
name = l.text
if name != None:
try:
if category != None:
if category == "Landkreis":
d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id, or_(districts.category == "Landkreis", districts.category == "Kreis")).one()
else:
d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id, districts.category == category).one()
else:
d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id).one()
result["ok"].append({"id": d.id, "link": l["href"], "comment": l.text})
except NoResultFound:
result["err"].append({"id": None, "link": l["href"], "comment": l.text})
except MultipleResultsFound:
result["err"].append({"id": None, "link": l["href"], "comment": l.text})
with open('districtlinks.json', 'w') as json_file:
json.dump(result, json_file)
def part2():
with open('links.json') as f:
data = json.load(f)
abgedeckt = {}
for d in data:
abgedeckt[d["id"]] = d
result = {
"ok" : data,
"missing" : []
}
for d in districts.query.all():
if d.id not in abgedeckt:
result["missing"].append({"id": d.id, "link": "", "comment": d.name_de})
print(d.id)
#with open('districtlinks2.json', 'w') as json_file:
# json.dump(result, json_file)
def part3():
with open('links.json') as f:
data = json.load(f)
jobQueue = Queue()
resultQueue = Queue()
for d in data:
jobQueue.put(d)
for i in range(multiprocessing.cpu_count()):
worker = threading.Thread(target=part3_helper, args=(jobQueue,resultQueue))
worker.start()
jobQueue.join()
print("DONE")
result = []
for q_item in resultQueue.queue:
result.append(q_item)
with open('unsuccessfull.json', 'w') as json_file:
json.dump(result, json_file)
def part3_helper(q, resultQueue):
while not q.empty():
job = q.get()
try:
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0"}
r = requests.get(job["link"], timeout=(5, 10), headers=headers)
if r.status_code != 200:
res = job
res["statusCode"] = r.status_code,
print(res)
resultQueue.put(res)
except requests.exceptions.RequestException as e: # This is the correct syntax
res = job
res["exception"] = str(e),
print(res)
resultQueue.put(res)
q.task_done()
#part3()
import os
def tiles():
from pathlib import Path
jobQueue = Queue()
files = list(Path("../app/src/static/tiles").rglob("*.png"))
for f in files:
jobQueue.put(str(f))
for i in range(multiprocessing.cpu_count()):
worker = threading.Thread(target=tile_helper, args=(jobQueue,))
worker.start()
jobQueue.join()
print("DONE")
def tile_helper(q):
while not q.empty():
job = q.get()
try:
os.system("convert "+job+" -quality 85 "+job[:-3]+"jpg")
os.system("rm "+job)
except: # This is the correct syntax
print("Something went wrong:", job)
q.task_done()
tiles()
| 32.323699 | 207 | 0.54721 |
1bcdd8f54f3556ddf7d2cfe219e933a999018f43
| 762 |
py
|
Python
|
v602/python/vorbereitung.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 2 |
2019-12-10T10:25:11.000Z
|
2021-01-26T13:59:40.000Z
|
v602/python/vorbereitung.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | null | null | null |
v602/python/vorbereitung.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 1 |
2020-12-06T21:24:58.000Z
|
2020-12-06T21:24:58.000Z
|
import numpy as np
import scipy.constants as const
planckh = const.Planck
cspeed = const.speed_of_light
charge = const.elementary_charge
d = 201.4*10**(-12)
ordnung = np.array([29, 29, 30, 32, 35, 37, 38, 40, 41])
ek = np.array([8.048, 8.905, 9.673 ,11.115 ,13.483 ,15.202 ,16.106 ,17.997 ,18.985])
ek = ek * charge * 10**3
rhyd = 13.6
anzahl = 9
theta = np.ones(anzahl)
sigma = np.ones(anzahl)
def ftheta(f):
return np.arcsin((planckh*cspeed)/(2*d*f))*180/np.pi
def fsigma(f, z):
return (z-np.sqrt(f/(rhyd*charge)))
for i in range(anzahl):
theta[i] = ftheta(ek[i])
sigma[i] = fsigma(ek[i], ordnung[i])
np.savetxt('build/vorbereitung.txt', np.column_stack([ordnung, ek/charge, theta, sigma]),
header='ordnung, ek, theta, sigma')
| 27.214286 | 89 | 0.650919 |
ca052855dae94ae49b901206ffa68288e3011ad6
| 300 |
py
|
Python
|
pacman-arch/test/pacman/tests/upgrade074.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade074.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade074.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "pkg2<2.0 dependency (satisfy)"
p = pmpkg("pkg1")
p.depends = ["pkg2<2.0"]
self.addpkg(p)
lp = pmpkg("pkg2", "1.9b-3")
self.addpkg2db("local", lp)
self.args = "-U %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_EXIST=pkg2")
| 20 | 50 | 0.67 |
ca4d1cd6ab8853ca002a1ce2d49507b3dfc70323
| 339 |
py
|
Python
|
exercises/zh/solution_03_09_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/zh/solution_03_09_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/zh/solution_03_09_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
from spacy.lang.zh import Chinese
from spacy.tokens import Token
nlp = Chinese()
# 注册词符的扩展属性"is_country",其默认值是False
Token.set_extension("is_country", default=False)
# 处理文本,将词符"新加坡"的is_country属性设置为True
doc = nlp("我住在新加坡。")
doc[3]._.is_country = True
# 对所有词符打印词符文本及is_country属性
print([(token.text, token._.is_country) for token in doc])
| 22.6 | 58 | 0.769912 |
ca61afc03fb4605ab810701f6f211d0f3c6ad247
| 92 |
py
|
Python
|
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/10.0-Debugging.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/10.0-Debugging.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/10.0-Debugging.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
user_name = input("You name: ")
value = 1
new_string = value + user_name
print(new_string)
| 15.333333 | 31 | 0.717391 |
edb8b43d02230e34848138e4aa3e833df697e236
| 19,261 |
py
|
Python
|
Packs/Imperva_WAF/Integrations/ImpervaWAF/ImpervaWAF.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Imperva_WAF/Integrations/ImpervaWAF/ImpervaWAF.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Imperva_WAF/Integrations/ImpervaWAF/ImpervaWAF.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import traceback
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_CONTEXT_NAME = 'ImpervaWAF'
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
session_id = ''
def do_request(self, method, url_suffix, json_data=None):
if not self.session_id:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406), resp_type='response')
if res.status_code == 401:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406),
resp_type='response')
if res.text:
res = res.json()
else:
res = {}
extract_errors(res)
return res
def login(self):
res = self._http_request('POST', 'SecureSphere/api/v1/auth/session', auth=self._auth)
extract_errors(res)
self.session_id = res.get('session-id')
def get_ip_group_entities(self, group_name, table_name):
raw_res = self.do_request('GET', f'conf/ipGroups/{group_name}')
entries = []
for entry in raw_res.get('entries'):
entries.append({'Type': entry.get('type'),
'IpAddressFrom': entry.get('ipAddressFrom'),
'IpAddressTo': entry.get('ipAddressTo'),
'NetworkAddress': entry.get('networkAddress'),
'CidrMask': entry.get('cidrMask')})
human_readable = tableToMarkdown(table_name, entries, removeNull=True,
headers=['Type', 'IpAddressFrom', 'IpAddressTo', 'NetworkAddress', 'CidrMask'])
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)':
{'Name': group_name, 'Entries': entries}}
return human_readable, entry_context, raw_res
def get_custom_policy_outputs(self, policy_name, table_name):
raw_res = self.do_request('GET', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
policy = {'Name': policy_name,
'Enabled': raw_res.get('enabled'),
'OneAlertPerSession': raw_res.get('oneAlertPerSession'),
'DisplayResponsePage': raw_res.get('displayResponsePage'),
'Severity': raw_res.get('severity'),
'Action': raw_res.get('action'),
'FollowedAction': raw_res.get('followedAction'),
'ApplyTo': raw_res.get('applyTo'),
'MatchCriteria': raw_res.get('matchCriteria')}
hr_policy = policy.copy()
del hr_policy['MatchCriteria']
del hr_policy['ApplyTo']
human_readable = tableToMarkdown(table_name, hr_policy, removeNull=True)
if raw_res.get('applyTo'):
human_readable += '\n\n' + tableToMarkdown('Services to apply the policy to', raw_res.get('applyTo'),
removeNull=True)
for match in raw_res.get('matchCriteria', []):
tmp_match = match.copy()
operation = match['operation']
match_type = match['type']
# generate human readable for sourceIpAddresses type
if match_type == 'sourceIpAddresses':
if tmp_match.get('userDefined'):
for i, element in enumerate(tmp_match['userDefined']):
tmp_match['userDefined'][i] = {'IP Address': tmp_match['userDefined'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Source IP addresses:',
tmp_match['userDefined'], removeNull=True)
if tmp_match.get('ipGroups'):
for i, element in enumerate(tmp_match['ipGroups']):
tmp_match['ipGroups'][i] = {'Group name': tmp_match['ipGroups'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n IP Groups:',
tmp_match['ipGroups'], removeNull=True)
# generate human readable for sourceGeolocation type
elif match_type == 'sourceGeolocation':
if tmp_match.get('values'):
for i, element in enumerate(tmp_match['values']):
tmp_match['values'][i] = {'Country name': tmp_match['values'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Countries to match:',
tmp_match['values'], removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policy}
return human_readable, entry_context, raw_res
def extract_errors(res):
if not isinstance(res, list) and res.get('errors'):
error_message = ''
for err in res['errors']:
error_message += f'error-code: {err.get("error-code")}, description: {err.get("description")}'
raise Exception(error_message)
def generate_policy_data_body(args):
severity = args.get('severity')
action = args.get('action')
followed_action = args.get('followed-action')
body = {}
if args.get('enabled'):
body['enabled'] = args['enabled'] == 'True'
if args.get('one-alert-per-session'):
body['oneAlertPerSession'] = args['one-alert-per-session'] == 'True'
if args.get('display-response-page'):
body['displayResponsePage'] = args['display-response-page'] == 'True'
if severity:
body['severity'] = severity
if action:
body['action'] = action
if followed_action:
body['followedAction'] = followed_action
return body
def generate_match_criteria(body, args):
geo_location_criteria_operation = args.get('geo-location-criteria-operation')
ip_addresses_criteria_operation = args.get('ip-addresses-criteria-operation')
ip_groups = args.get('ip-groups', '')
ip_addreses = args.get('ip-addresses', '')
country_names = args.get('country-names', '')
match_criteria = []
if geo_location_criteria_operation:
if not country_names:
raise Exception('country-names argument is empty')
geo_location_match_item = {'type': 'sourceGeolocation',
'operation': geo_location_criteria_operation,
'values': country_names.split(',')}
match_criteria.append(geo_location_match_item)
if ip_addresses_criteria_operation:
if not ip_groups and not ip_addreses:
raise Exception('ip-groups and ip-addresses arguments are empty, please fill at least one of them')
ip_addresses_match_item = {'type': 'sourceIpAddresses',
'operation': ip_addresses_criteria_operation}
if ip_groups:
ip_addresses_match_item['ipGroups'] = ip_groups.split(',')
if ip_addreses:
ip_addresses_match_item['userDefined'] = ip_addreses.split(',')
match_criteria.append(ip_addresses_match_item)
body['matchCriteria'] = match_criteria
return body
def generate_ip_groups_entries(args):
entry_type = args.get('entry-type')
ip_from = args.get('ip-address-from')
ip_to = args.get('ip-address-to')
network_address = args.get('network-address')
cidr_mask = args.get('cidr-mask')
operation = args.get('operation')
json_entries = args.get('json-entries')
if not json_entries:
entry = {}
if entry_type == 'single':
entry['ipAddressFrom'] = ip_from
elif entry_type == 'range':
entry['ipAddressFrom'] = ip_from
entry['ipAddressTo'] = ip_to
elif entry_type == 'network':
entry['networkAddress'] = network_address
entry['cidrMask'] = cidr_mask
else:
raise Exception('entry-type argument is invalid')
entry['type'] = entry_type
entry['operation'] = operation
body = {'entries': [entry]}
else:
try:
json_entries = json.loads(json_entries)
except Exception:
raise Exception(f'Failed to parse json-entries as JSON data, 'f' received object:\n{json_entries}')
body = {'entries': json_entries}
return body
@logger
def test_module(client, args):
raw_res = client.do_request('GET', 'conf/sites')
if raw_res.get('sites'):
demisto.results('ok')
@logger
def ip_group_list_command(client, args):
raw_res = client.do_request('GET', 'conf/ipGroups')
groups = []
if raw_res.get('names'):
groups = raw_res['names']
for i, element in enumerate(groups):
groups[i] = {'Name': groups[i]}
human_readable = tableToMarkdown('IP groups', groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)': groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_list_entries_command(client, args):
group_name = args.get('ip-group-name')
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'IP group entries for {group_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_remove_entries_command(client, args):
group_name = args.get('ip-group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}/clear')
return_outputs(f'The IP group {group_name} is now empty', {}, raw_res)
@logger
def sites_list_command(client, args):
raw_res = client.do_request('GET', 'conf/sites')
sites = [{'Name': site} for site in raw_res.get('sites', [])]
human_readable = tableToMarkdown('All sites in the system', sites, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Site(val.Name===obj.Name)': sites}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_groups_list_command(client, args):
site = args.get('site-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}')
server_groups = []
if raw_res.get('server-groups'):
server_groups = raw_res['server-groups']
for i, element in enumerate(server_groups):
server_groups[i] = {'Name': server_groups[i], 'SiteName': site}
human_readable = tableToMarkdown(f'Server groups in {site}', server_groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.ServerGroup(val.Name===obj.Name)': server_groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_group_policies_list_command(client, args):
site = args.get('site-name')
server_group = args.get('server-group-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}/{server_group}/securityPolicies')
policies = []
for policy in raw_res:
policies.append({'System': policy.get('system'),
'PolicyName': policy.get('policy-name'),
'PolicyType': policy.get('policy-type'),
'ServerGroup': server_group,
'SiteName': site})
human_readable = tableToMarkdown(f'Policies for {server_group}', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.SecurityPolicy(val.PolicyName===obj.PolicyName)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def custom_policy_list_command(client, args):
raw_res = client.do_request('GET', 'conf/policies/security/webServiceCustomPolicies')
policies = []
if raw_res.get('customWebPolicies'):
policies = raw_res['customWebPolicies']
for i, element in enumerate(policies):
policies[i] = {'Name': policies[i]}
human_readable = tableToMarkdown('Custom web policies', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def get_custom_policy_command(client, args):
policy_name = args.get('policy-name')
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy data for {policy_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def create_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('POST', f'conf/ipGroups/{group_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('PUT', f'conf/ipGroups/{group_name}/data', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_ip_group_command(client, args):
group_name = args.get('group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}')
return_outputs(f'Group {group_name} deleted successfully', {}, raw_res)
@logger
def create_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply')
web_service = args.get('web-service-name-to-apply')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise Exception(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
body['applyTo'] = [{'siteName': site, 'serverGroupName': server_group, 'webServiceName': web_service}]
client.do_request('POST', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply', '')
web_service = args.get('web-service-name-to-apply', '')
apply_operation = args.get('apply-operation', '')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise DemistoException(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
if apply_operation:
body['applyTo'] = [{'operation': apply_operation, 'siteName': site, 'serverGroupName': server_group,
'webServiceName': web_service}]
client.do_request('PUT', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_custom_policy_command(client, args):
policy_name = args.get('policy-name')
raw_res = client.do_request('DELETE', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
return_outputs(f'Policy {policy_name} deleted successfully', {}, raw_res)
def main():
params = demisto.params()
# get the service API url
base_url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
credentials = params.get('credentials')
username = credentials['identifier'] if credentials else ''
password = credentials['password'] if credentials else ''
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy)
command = demisto.command()
args = demisto.args()
commands = {'test-module': test_module,
'imperva-waf-ip-group-list': ip_group_list_command,
'imperva-waf-ip-group-list-entries': ip_group_list_entries_command,
'imperva-waf-ip-group-remove-entries': ip_group_remove_entries_command,
'imperva-waf-sites-list': sites_list_command,
'imperva-waf-server-group-list': server_groups_list_command,
'imperva-waf-server-group-list-policies': server_group_policies_list_command,
'imperva-waf-web-service-custom-policy-list': custom_policy_list_command,
'imperva-waf-web-service-custom-policy-get': get_custom_policy_command,
'imperva-waf-ip-group-create': create_ip_group_command,
'imperva-waf-ip-group-update-entries': update_ip_group_command,
'imperva-waf-ip-group-delete': delete_ip_group_command,
'imperva-waf-web-service-custom-policy-create': create_custom_policy_command,
'imperva-waf-web-service-custom-policy-update': update_custom_policy_command,
'imperva-waf-web-service-custom-policy-delete': delete_custom_policy_command,
}
if command in commands:
commands[command](client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
# Log exceptions
except Exception as e:
return_error(f'Unexpected error: {str(e)}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 41.421505 | 120 | 0.643165 |
6128869bbbd1b6d663dea52577327d9fda3814e7
| 1,300 |
py
|
Python
|
Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/03_formen_zeichnen/03_formen_zeichnen.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/03_formen_zeichnen/03_formen_zeichnen.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/03_formen_zeichnen/03_formen_zeichnen.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
# 03_formen_zeichnen.py
# In dieser Übung geht es darum Formen zu zeichnen mit der Funktion ImageDraw
from PIL import Image, ImageDraw
import os
os.chdir(os.path.dirname(__file__))
target_file='.\\drawed_image.png'
if os.path.exists(target_file):
os.remove(target_file)
new_image=Image.new('RGBA', (200,200), 'white')
# Erstelle Draw-Objekt
draw=ImageDraw.Draw(new_image)
# Die Parameter "fill" und "outline" sind Optional, werden diese weggelassen verwendet DrawImage dafür die Farbe Weiss
# Punkte zeichnen
point_coordinates=[(160,10),(160,30),(160,50),(160,70),(160,90)]
draw.point(point_coordinates, fill='black')
# Linien zeichnen
line_coordinates=[(10,10),(10,60),(60,60)]
draw.line(line_coordinates, fill='black', width=5)
# Rechtecke zeichnen mit Rechtecktuple (links,oben,rechts,unten)
square_props=(100,100,150,150)
draw.rectangle(square_props, fill='red', outline='green')
# Ellypsen zeichnen mit Rechtecktuple
ellipse_props=(50,150,100,200)
draw.ellipse(ellipse_props, fill='blue', outline='magenta')
# Polygone zeichnen
polygon_props=[(10,180), (30,170), (45,150), (25,145), (15,160)]
draw.polygon(polygon_props, fill='black')
for i in range(110, 200, 10):
line_coordinates=[(0,i),(i-100,200)]
draw.line(line_coordinates, fill='red', width=2)
new_image.save(target_file)
| 36.111111 | 118 | 0.75 |
b65c098c84b052f6ebd8bf731c4ac636571a7bc5
| 9,104 |
py
|
Python
|
feedcrawler/ombi.py
|
rix1337/FeedCrawler
|
6c104c39f1bd45e5d49c02c90bb8d9fd35f9709e
|
[
"MIT"
] | 16 |
2021-04-06T07:37:18.000Z
|
2022-03-27T15:05:32.000Z
|
feedcrawler/ombi.py
|
rix1337/FeedCrawler
|
6c104c39f1bd45e5d49c02c90bb8d9fd35f9709e
|
[
"MIT"
] | 17 |
2021-06-09T10:37:49.000Z
|
2022-03-31T07:29:36.000Z
|
feedcrawler/ombi.py
|
rix1337/FeedCrawler
|
6c104c39f1bd45e5d49c02c90bb8d9fd35f9709e
|
[
"MIT"
] | 3 |
2021-05-22T13:49:57.000Z
|
2022-01-05T11:15:47.000Z
|
# -*- coding: utf-8 -*-
# FeedCrawler
# Projekt von https://github.com/rix1337
import json
import requests
from imdb import IMDb
import feedcrawler.search.shared.content_all
import feedcrawler.search.shared.content_shows
from feedcrawler import internal
from feedcrawler.common import decode_base64
from feedcrawler.common import encode_base64
from feedcrawler.common import sanitize
from feedcrawler.config import CrawlerConfig
from feedcrawler.db import FeedDb
from feedcrawler.imdb import clean_imdb_id
def imdb_movie(imdb_id):
try:
imdb_id = clean_imdb_id(imdb_id)
ia = IMDb('https', languages='de-DE')
output = ia.get_movie(imdb_id)
title = sanitize(output.data['localized title'])
year = str(output.data['year'])
return title + " " + year
except:
if imdb_id is None:
internal.logger.debug("Ein Film ohne IMDb-ID wurde angefordert.")
else:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False
def imdb_show(imdb_id):
try:
imdb_id = clean_imdb_id(imdb_id)
ia = IMDb('https', languages='de-DE')
output = ia.get_movie(imdb_id)
ia.update(output, 'episodes')
title = sanitize(output.data['localized title'])
seasons = output.data['episodes']
eps = {}
for sn in seasons:
ep = []
for e in seasons[sn]:
ep.append(int(e))
eps[int(sn)] = ep
return title, eps
except:
if imdb_id is None:
internal.logger.debug("Eine Serie ohne IMDb-ID wurde angefordert.")
else:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False
def ombi(first_launch):
db = FeedDb('Ombi')
config = CrawlerConfig('Ombi')
url = config.get('url')
api = config.get('api')
if not url or not api:
return [0, 0]
english = CrawlerConfig('FeedCrawler').get('english')
try:
requested_movies = requests.get(url + '/api/v1/Request/movie', headers={'ApiKey': api})
requested_movies = json.loads(requested_movies.text)
requested_shows = requests.get(url + '/api/v1/Request/tv', headers={'ApiKey': api})
requested_shows = json.loads(requested_shows.text)
len_movies = len(requested_movies)
len_shows = len(requested_shows)
if first_launch:
internal.logger.debug("Erfolgreich mit Ombi verbunden.")
print(u"Erfolgreich mit Ombi verbunden.")
except:
internal.logger.debug("Ombi ist nicht erreichbar!")
print(u"Ombi ist nicht erreichbar!")
return [0, 0]
if requested_movies:
internal.logger.debug(
"Die Suchfunktion für Filme nutzt BY, FX, HW und NK, sofern deren Hostnamen gesetzt wurden.")
for r in requested_movies:
if bool(r.get("approved")):
if not bool(r.get("available")):
imdb_id = r.get("imdbId")
if not db.retrieve('movie_' + str(imdb_id)) == 'added':
title = imdb_movie(imdb_id)
if title:
best_result = feedcrawler.search.shared.content_all.get_best_result(title)
print(u"Film: " + title + u" durch Ombi hinzugefügt.")
if best_result:
feedcrawler.search.shared.content_all.download(best_result)
if english:
title = r.get('title')
best_result = feedcrawler.search.shared.content_all.get_best_result(title)
print(u"Film: " + title + u"durch Ombi hinzugefügt.")
if best_result:
feedcrawler.search.shared.content_all.download(best_result)
db.store('movie_' + str(imdb_id), 'added')
if requested_shows:
internal.logger.debug("Die Suchfunktion für Serien nutzt SJ, sofern der Hostname gesetzt wurde.")
for r in requested_shows:
imdb_id = r.get("imdbId")
child_requests = r.get("childRequests")
for cr in child_requests:
if bool(cr.get("approved")):
if not bool(cr.get("available")):
details = cr.get("seasonRequests")
for season in details:
sn = season.get("seasonNumber")
eps = []
episodes = season.get("episodes")
for episode in episodes:
if not bool(episode.get("available")):
enr = episode.get("episodeNumber")
s = str(sn)
if len(s) == 1:
s = "0" + s
s = "S" + s
e = str(enr)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
eps.append(enr)
if eps:
infos = imdb_show(imdb_id)
if infos:
title = infos[0]
all_eps = infos[1]
check_sn = False
if all_eps:
check_sn = all_eps.get(sn)
if check_sn:
sn_length = len(eps)
check_sn_length = len(check_sn)
if check_sn_length > sn_length:
for ep in eps:
e = str(ep)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
payload = feedcrawler.search.shared.content_shows.get_best_result(title)
if payload:
payload = decode_base64(payload).split("|")
payload = encode_base64(payload[0] + "|" + payload[1] + "|" + se)
added_episode = feedcrawler.search.shared.content_shows.download(
payload)
if not added_episode:
payload = decode_base64(payload).split("|")
payload = encode_base64(payload[0] + "|" + payload[1] + "|" + s)
add_season = feedcrawler.search.shared.content_shows.download(
payload)
for e in eps:
e = str(e)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
db.store('show_' + str(imdb_id) + '_' + se, 'added')
if not add_season:
internal.logger.debug(
u"Konnte kein Release für " + title + " " + se + "finden.")
break
db.store('show_' + str(imdb_id) + '_' + se, 'added')
else:
payload = feedcrawler.search.shared.content_shows.get_best_result(title)
if payload:
payload = decode_base64(payload).split("|")
payload = encode_base64(payload[0] + "|" + payload[1] + "|" + s)
feedcrawler.search.shared.content_shows.download(payload)
for ep in eps:
e = str(ep)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
db.store('show_' + str(imdb_id) + '_' + se, 'added')
print(u"Serie/Staffel/Episode: " + title + u" durch Ombi hinzugefügt.")
return [len_movies, len_shows]
| 47.915789 | 119 | 0.426955 |
1ea25f96262b8f3b601a8f5a5914c8a331412ede
| 930 |
py
|
Python
|
Theories/DataStructures/QueueAndStack/StackDFS/BSTInorderTraversal/bst_inorder_traversal.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Theories/DataStructures/QueueAndStack/StackDFS/BSTInorderTraversal/bst_inorder_traversal.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Theories/DataStructures/QueueAndStack/StackDFS/BSTInorderTraversal/bst_inorder_traversal.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# Recursively
# def inorderTraversal(root: TreeNode) -> List[int]:
# traversal_list = []
#
# def traversal(cur_root: TreeNode):
# if cur_root:
# traversal(cur_root.left)
# traversal_list.append(cur_root.val)
# traversal(cur_root.right)
#
# traversal(root)
# return traversal_list
# Iterative
def inorderTraversal(root: TreeNode) -> List[int]:
traversal_list, stack = [], []
cur_root = root
while cur_root or stack:
while cur_root:
stack.append(cur_root)
cur_root = cur_root.left
cur_root = stack.pop()
traversal_list.append(cur_root.val)
cur_root = cur_root.right
return traversal_list
| 24.473684 | 53 | 0.622581 |
1eabcbd4a8bf4fe8a9167df6618cfb21090c2e90
| 136 |
py
|
Python
|
EC3/Thu.py
|
CSUpengyuyan/ECExperiment
|
4ea41837de421f18884be31248d57e88ea32b84b
|
[
"MIT"
] | null | null | null |
EC3/Thu.py
|
CSUpengyuyan/ECExperiment
|
4ea41837de421f18884be31248d57e88ea32b84b
|
[
"MIT"
] | null | null | null |
EC3/Thu.py
|
CSUpengyuyan/ECExperiment
|
4ea41837de421f18884be31248d57e88ea32b84b
|
[
"MIT"
] | null | null | null |
import thulac
string = open('paper','r',encoding='UTF-8').read()
t = thulac.thulac()
result = t.cut(string)
print(len(result),result)
| 17 | 50 | 0.683824 |
15846da1e5d4bf26623eefb4252273111caff529
| 1,311 |
py
|
Python
|
verto/processors/CommentPreprocessor.py
|
uccser/verto
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 4 |
2017-04-10T06:09:54.000Z
|
2019-05-04T02:07:40.000Z
|
verto/processors/CommentPreprocessor.py
|
uccser/verto
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 268 |
2017-04-03T20:40:46.000Z
|
2022-02-04T20:10:08.000Z
|
verto/processors/CommentPreprocessor.py
|
uccser/kordac
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 1 |
2019-01-07T15:46:31.000Z
|
2019-01-07T15:46:31.000Z
|
from markdown.preprocessors import Preprocessor
import re
class CommentPreprocessor(Preprocessor):
''' Searches a Document for comments (e.g. {comment example text here})
and removes them from the document.
'''
def __init__(self, ext, *args, **kwargs):
'''
Args:
ext: An instance of the Markdown parser class.
'''
super().__init__(*args, **kwargs)
self.processor = 'comment'
self.pattern = re.compile(ext.processor_info[self.processor]['pattern'])
def test(self, lines):
'''Return whether the provided document contains comments needing removal.
Args:
lines: A string of Markdown text.
Returns:
True if the document needs to be processed.
'''
return self.pattern.search(lines) is not None
def run(self, lines):
''' Removes all instances of text that match the following
example {comment example text here}. Inherited from
Preprocessor class.
Args:
lines: A list of lines of the Markdown document to be converted.
Returns:
Markdown document with comments removed.
'''
for i, line in enumerate(lines):
lines[i] = re.sub(self.pattern, '', line)
return lines
| 30.488372 | 82 | 0.611747 |
01ea323a6f50daf34073d0b73b233c52d48d26a6
| 295 |
py
|
Python
|
exercises/en/exc_02_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/en/exc_02_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/en/exc_02_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
from spacy.lang.en import English
nlp = English()
doc = nlp("David Bowie is a PERSON")
# Look up the hash for the string label "PERSON"
person_hash = ____.____.____[____]
print(person_hash)
# Look up the person_hash to get the string
person_string = ____.____.____[____]
print(person_string)
| 22.692308 | 48 | 0.755932 |
01f4a18493bfb22d2c1901b879b8cf20d9f32f0f
| 6,792 |
py
|
Python
|
bridges/east_open_cv_bridge.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | null | null | null |
bridges/east_open_cv_bridge.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | 16 |
2020-01-28T23:04:13.000Z
|
2022-03-12T00:02:40.000Z
|
bridges/east_open_cv_bridge.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | null | null | null |
"""
This file contains source code from another GitHub project. The comments made there apply. The source code
was licensed under the MIT License. The license text and a detailed reference can be found in the license
subfolder at models/east_open_cv/license. Many thanks to the author of the code.
For reasons of clarity unneeded parts of the original code were not taken over. The original project can
be found on the https://github.com/ZER-0-NE/EAST-Detector-for-text-detection-using-OpenCV page.
For a better understanding the documentation has been supplemented in parts. Code completely or predominantly
taken from the source was marked with "External code".
"""
import time
import cv2
import numpy as np
from imutils.object_detection import non_max_suppression
import bridges_config as config
class EastOpenCvBridge:
"""A bridge class for connecting to a text detector
"""
def __init__(self):
"""The constructor
"""
self.load_model()
def load_model(self):
"""Loads the underlying model together with its pre-trained weights.
"""
try:
self.model = cv2.dnn.readNet(config.EAST_OPENCV_MODEL_PATH)
except:
print('Error in method {0} in module {1}'.format('load_model', 'east_open_cv_bridge.py'))
def scann(self, image):
"""External code (add try...except and an extension)
Examines the passed image for text regions and returns them as a collection of boxes in the
form of a NumPy array. The passed image must be a raster image.
:param image:The image to be examined.
:return:A NumPy array of predicted text areas.
"""
try:
# load the input image and grab the image dimensions
self.orig = image.copy()
(H, W) = image.shape[:2]
# set the new width and height and then determine the ratio in change
# for both the width and height, should be multiple of 32
(newW, newH) = (320, 320)
rW = W / float(newW)
rH = H / float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
self.layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
self.model.setInput(blob)
(scores, geometry) = self.model.forward(self.layerNames)
end = time.time()
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = [] # stores the bounding box coordiantes for text regions
confidences = [] # stores the probability associated with each bounding box region in rects
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the geometrical
# data used to derive potential bounding box coordinates that
# surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability, ignore it
if scoresData[x] < 0.5:
continue
# compute the offset factor as our resulting feature maps will
# be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and then
# compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height of
# the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates for
# the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score to
# our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
"""
Extension to the original code to return a usable format.
"""
newboxes = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
box = []
box.append([startX, startY])
box.append([endX, startY])
box.append([endX, endY])
box.append([startX, endY])
newboxes.append(box)
return np.asarray(newboxes)
except:
print('Error in method {0} in module {1}'.format('scann', 'east_open_cv_bridge.py'))
return None
| 42.716981 | 110 | 0.552415 |
1742d077bb905d8522808434ce28bc222558d79b
| 50 |
py
|
Python
|
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
|
281c0a89ce56796437fe054068058c0f01a7df02
|
[
"RSA-MD"
] | null | null | null |
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
|
281c0a89ce56796437fe054068058c0f01a7df02
|
[
"RSA-MD"
] | null | null | null |
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
|
281c0a89ce56796437fe054068058c0f01a7df02
|
[
"RSA-MD"
] | null | null | null |
print("db package (Mapper) wird initialisiert...")
| 50 | 50 | 0.74 |
176fa23117b8291f663bab8e7d082dea0bbac0f6
| 3,287 |
py
|
Python
|
Chapter7_CNN/Chapter7_3_CNN_Optimization/mnistData.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter7_CNN/Chapter7_3_CNN_Optimization/mnistData.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter7_CNN/Chapter7_3_CNN_Optimization/mnistData.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
class MNIST:
def __init__(self, with_normalization: bool = True) -> None:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
self.x_train_: np.ndarray = None
self.y_train_: np.ndarray = None
self.x_val_: np.ndarray = None
self.y_val_: np.ndarray = None
self.val_size = 0
self.train_splitted_size = 0
# Preprocess x data
self.x_train = x_train.astype(np.float32)
self.x_train = np.expand_dims(x_train, axis=-1)
if with_normalization:
self.x_train = self.x_train / 255.0
self.x_test = x_test.astype(np.float32)
self.x_test = np.expand_dims(x_test, axis=-1)
if with_normalization:
self.x_test = self.x_test / 255.0
# Dataset attributes
self.train_size = self.x_train.shape[0]
self.test_size = self.x_test.shape[0]
self.width = self.x_train.shape[1]
self.height = self.x_train.shape[2]
self.depth = self.x_train.shape[3]
self.img_shape = (self.width, self.height, self.depth)
self.num_classes = 10
# Preprocess y data
self.y_train = to_categorical(y_train, num_classes=self.num_classes)
self.y_test = to_categorical(y_test, num_classes=self.num_classes)
def get_train_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_train, self.y_train
def get_test_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_test, self.y_test
def get_splitted_train_validation_set(self, validation_size: float = 0.33) -> tuple:
self.x_train_, self.x_val_, self.y_train_, self.y_val_ = train_test_split(
self.x_train,
self.y_train,
test_size=validation_size
)
self.val_size = self.x_val_.shape[0]
self.train_splitted_size = self.x_train_.shape[0]
return self.x_train_, self.x_val_, self.y_train_, self.y_val_
def data_augmentation(self, augment_size: int = 5_000) -> None:
image_generator = ImageDataGenerator(
rotation_range=5,
zoom_range=0.08,
width_shift_range=0.08,
height_shift_range=0.08
)
# Fit the data generator
image_generator.fit(self.x_train, augment=True)
# Get random train images for the data augmentation
rand_idxs = np.random.randint(self.train_size, size=augment_size)
x_augmented = self.x_train[rand_idxs].copy()
y_augmented = self.y_train[rand_idxs].copy()
x_augmented = image_generator.flow(
x_augmented,
np.zeros(augment_size),
batch_size=augment_size,
shuffle=False
).next()[0]
# Append the augmented images to the train set
self.x_train = np.concatenate((self.x_train, x_augmented))
self.y_train = np.concatenate((self.y_train, y_augmented))
self.train_size = self.x_train.shape[0]
| 41.607595 | 89 | 0.639793 |
da0671570ab644de2ce4f71e6570e6da551814ad
| 651 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex09_pascal_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex09_pascal_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex09_pascal_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch03_recursion.solutions.ex09_pascal_triangle import calc_pascal_with_action
@pytest.mark.parametrize("n, expected",
[(1, [1]),
(2, [1, 1]),
(3, [1, 2, 1]),
(4, [1, 3, 3, 1]),
(5, [1, 4, 6, 4, 1]),
(6, [1, 5, 10, 10, 5, 1]),
(7, [1, 6, 15, 20, 15, 6, 1])])
def test_calc_pascal_with_action(n, expected):
assert calc_pascal_with_action(n, None) == expected
| 32.55 | 81 | 0.473118 |
e50927dcc0a07e35b56a084b8f3a7711877da2df
| 709 |
py
|
Python
|
user/managers.py
|
TheKiddos/StaRat
|
33807d73276563f636b430e1bbfcb65b645869f7
|
[
"MIT"
] | 1 |
2021-05-18T16:33:10.000Z
|
2021-05-18T16:33:10.000Z
|
user/managers.py
|
TheKiddos/StaRat
|
33807d73276563f636b430e1bbfcb65b645869f7
|
[
"MIT"
] | 3 |
2021-05-18T16:02:32.000Z
|
2021-05-21T15:20:12.000Z
|
user/managers.py
|
TheKiddos/StaRat
|
33807d73276563f636b430e1bbfcb65b645869f7
|
[
"MIT"
] | 1 |
2021-09-12T22:56:09.000Z
|
2021-09-12T22:56:09.000Z
|
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
"""Creates and saves a new user"""
if not email:
raise ValueError("User must have an email address")
user = self.model(email=self.normalize_email(email), **kwargs)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
| 27.269231 | 70 | 0.64598 |
97416b0e9958cd86fc9e9dfc6f8bd4b6ecd45671
| 1,545 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch03_recursion/solutions/ex02_digits.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch03_recursion/solutions/ex02_digits.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch03_recursion/solutions/ex02_digits.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
def count_digits(value):
if value < 0:
raise ValueError("value must be >= 0")
# rekursiver Abbruch
if value < 10:
return 1
# rekursiver Abstieg
return count_digits(value // 10) + 1
def count_digits_shorter(value):
return sum([1 for _ in str(value)])
def count_digits_tricky(value):
return len(str(value))
def calc_sum_of_digits(value):
if value < 0:
raise ValueError("value must be >= 0")
# rekursiver Abbruch
if value < 10:
return value
remainder = value // 10
last_digit = value % 10
# rekursiver Abstieg
return calc_sum_of_digits(remainder) + last_digit
def calc_sum_of_digits_divmod(value):
if value < 0:
raise ValueError("value must be >= 0")
# rekursiver Abbruch
if value < 10:
return value
remainder, last_digit = divmod(value, 10)
# rekursiver Abstieg
return calc_sum_of_digits(remainder) + last_digit
def calc_sum_of_digits_shorter(value):
return sum([int(ch) for ch in str(value)])
def main():
print(count_digits(72))
print(count_digits(7271))
print(count_digits(72))
print(count_digits(7271))
print(calc_sum_of_digits(72))
print(calc_sum_of_digits(7271))
print(calc_sum_of_digits(123456))
print(calc_sum_of_digits_shorter(72))
print(calc_sum_of_digits_shorter(7271))
print(calc_sum_of_digits_shorter(123456))
if __name__ == "__main__":
main()
| 19.807692 | 53 | 0.673786 |
a9ef68c84e2f76bdbda7bbe30d25d26e26003e2e
| 3,932 |
py
|
Python
|
python/fleetx/dataset/ctr_data_generator.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
python/fleetx/dataset/ctr_data_generator.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
python/fleetx/dataset/ctr_data_generator.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There are 13 integer features and 26 categorical features
import os
import paddle
import paddle.fluid as fluid
import paddle.distributed.fleet as fleet
continous_features = range(1, 14)
categorial_features = range(14, 40)
continous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]
def get_dataloader(inputs,
train_files_path,
sparse_feature_dim,
batch_size,
shuffle=True):
file_list = [
str(train_files_path) + "/%s" % x for x in os.listdir(train_files_path)
]
loader = fluid.io.DataLoader.from_generator(
feed_list=inputs, capacity=64, use_double_buffer=True, iterable=True)
train_generator = CriteoDataset(sparse_feature_dim)
reader = train_generator.train(file_list,
fleet.worker_num(), fleet.worker_index())
if shuffle:
reader = paddle.batch(
paddle.reader.shuffle(
reader, buf_size=batch_size * 100),
batch_size=batch_size)
else:
reader = paddle.batch(reader, batch_size=batch_size)
places = fluid.CPUPlace()
loader.set_sample_list_generator(reader, places)
return loader
class CriteoDataset(object):
def __init__(self, sparse_feature_dim):
self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.cont_max_ = [
20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.cont_diff_ = [
20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.hash_dim_ = sparse_feature_dim
# here, training data are lines with line_index < train_idx_
self.train_idx_ = 41256555
self.continuous_range_ = range(1, 14)
self.categorical_range_ = range(14, 40)
def _reader_creator(self, file_list, is_train, trainer_num, trainer_id):
def reader():
for file in file_list:
with open(file, 'r') as f:
line_idx = 0
for line in f:
line_idx += 1
features = line.rstrip('\n').split('\t')
dense_feature = []
sparse_feature = []
for idx in self.continuous_range_:
if features[idx] == '':
dense_feature.append(0.0)
else:
dense_feature.append(
(float(features[idx]) -
self.cont_min_[idx - 1]) /
self.cont_diff_[idx - 1])
for idx in self.categorical_range_:
sparse_feature.append([
hash(str(idx) + features[idx]) % self.hash_dim_
])
label = [int(features[0])]
yield [dense_feature] + sparse_feature + [label]
return reader
def train(self, file_list, trainer_num, trainer_id):
return self._reader_creator(file_list, True, trainer_num, trainer_id)
def test(self, file_list):
return self._reader_creator(file_list, False, 1, 0)
| 38.930693 | 79 | 0.56943 |
ec56638e34e686b038f1d4f5c918467dd5d6ec30
| 1,654 |
py
|
Python
|
7-assets/past-student-repos/_Individual-Projects/Computer-Architecture-Notes-master/lectureII/beejMachine.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-03-19T19:25:39.000Z
|
2021-03-19T19:25:39.000Z
|
7-assets/past-student-repos/_Individual-Projects/Computer-Architecture-Notes-master/lectureII/beejMachine.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/_Individual-Projects/Computer-Architecture-Notes-master/lectureII/beejMachine.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
import sys
PRINT_BEEJ = 1
HALT = 2
PRINT_NUM = 3
SAVE = 4
PRINT_REGISTER = 5
ADD = 6
'''
SAVE takes 2 arguments
saves value in [ARG1] to register [ARG2]
'''
register = [0] * 8
memory = [0] * 128 # 128 bytes of RAM
def load_memory(filename):
try:
address = 0
with open(filename) as f:
for line in f:
# Split before and after any comment symbols
comment_split = line.split("#")
num = comment_split[0].strip()
# Ignore blanks
if num == "":
continue
value = int(num)
memory[address] = value
address += 1
except FileNotFoundError:
print(f"{sys.argv[0]}: {sys.argv[1]} not found")
sys.exit(2)
if len(sys.argv) != 2:
print("usage: simple.py <filename>", file=sys.stderr)
sys.exit(1)
filepath = sys.argv[1]
load_memory(filepath)
pc = 0
running = True
while running:
command = memory[pc]
if command == PRINT_BEEJ:
print("Beej!")
pc += 1
elif command == PRINT_NUM:
num = memory[pc + 1]
print(num)
pc += 2
elif command == SAVE:
num = memory[pc + 1]
reg = memory[pc + 2]
register[reg] = num
pc += 3
elif command == PRINT_REGISTER:
reg = memory[pc + 1]
print(register[reg])
pc += 2
elif command == ADD:
reg_a = memory[pc + 1]
reg_b = memory[pc + 2]
register[reg_a] += register[reg_b]
pc += 3
elif command == HALT:
running = False
pc += 1
else:
print(f"Unknown instruction: {command}")
sys.exit(1)
| 17.784946 | 60 | 0.527207 |
6bde5c3cf5dfee5fc50013e7c4782f365d39d8c6
| 2,917 |
py
|
Python
|
python/gdal_cookbook/cookbook_geometry/calculate_in_geometry.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/gdal_cookbook/cookbook_geometry/calculate_in_geometry.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/gdal_cookbook/cookbook_geometry/calculate_in_geometry.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from osgeo import ogr
"""
Calculate Envelope of a Geometry
"""
wkt = "LINESTRING (1181866.263593049 615654.4222507705, 1205917.1207499576 623979.7189589312, 1227192.8790041457 643405.4112779726, 1224880.2965852122 665143.6860159477)"
geom = ogr.CreateGeometryFromWkt(wkt)
# Get Envelope return a tuple (minX, maxX, minY, maxY)
env = geom.GetEnvelope()
print(f'minX:{env[0]}, minY:{env[0]}, maxX:{env[1]}, maxY:{env[3]}')
"""
Calculate the Area of a Geometry
"""
wkt = "POLYGON ((1162440.5712740074 672081.4332727483, 1162440.5712740074 647105.5431482664, 1195279.2416228633 647105.5431482664, 1195279.2416228633 672081.4332727483, 1162440.5712740074 672081.4332727483))"
poly = ogr.CreateGeometryFromWkt(wkt)
print(f'Area = {poly.GetArea()}')
"""
Calculate the Length of a Geometry
"""
wkt = "LINESTRING (1181866.263593049 615654.4222507705, 1205917.1207499576 623979.7189589312, 1227192.8790041457 643405.4112779726, 1224880.2965852122 665143.6860159477)"
geom = ogr.CreateGeometryFromWkt(wkt)
print(f'Length = {geom.Length()}')
"""
Get the geometry type (as a string) from a Geometry
"""
wkts = [
"POINT (1198054.34 648493.09)",
"LINESTRING (1181866.263593049 615654.4222507705, 1205917.1207499576 623979.7189589312, 1227192.8790041457 643405.4112779726, 1224880.2965852122 665143.6860159477)",
"POLYGON ((1162440.5712740074 672081.4332727483, 1162440.5712740074 647105.5431482664, 1195279.2416228633 647105.5431482664, 1195279.2416228633 672081.4332727483, 1162440.5712740074 672081.4332727483))"
]
for wkt in wkts:
geom = ogr.CreateGeometryFromWkt(wkt)
print(geom.GetGeometryName())
"""
Calculate intersection between two Geometries
"""
wkt1 = "POLYGON ((1208064.271243039 624154.6783778917, 1208064.271243039 601260.9785661874, 1231345.9998651114 601260.9785661874, 1231345.9998651114 624154.6783778917, 1208064.271243039 624154.6783778917))"
wkt2 = "POLYGON ((1199915.6662253144 633079.3410163528, 1199915.6662253144 614453.958118695, 1219317.1067437078 614453.958118695, 1219317.1067437078 633079.3410163528, 1199915.6662253144 633079.3410163528)))"
poly1 = ogr.CreateGeometryFromWkt(wkt1)
poly2 = ogr.CreateGeometryFromWkt(wkt2)
intersection = poly1.Intersection(poly2)
print(intersection.ExportToWkt())
"""
Calculate union between two Geometries
"""
wkt1 = "POLYGON ((1208064.271243039 624154.6783778917, 1208064.271243039 601260.9785661874, 1231345.9998651114 601260.9785661874, 1231345.9998651114 624154.6783778917, 1208064.271243039 624154.6783778917))"
wkt2 = "POLYGON ((1199915.6662253144 633079.3410163528, 1199915.6662253144 614453.958118695, 1219317.1067437078 614453.958118695, 1219317.1067437078 633079.3410163528, 1199915.6662253144 633079.3410163528)))"
poly1 = ogr.CreateGeometryFromWkt(wkt1)
poly2 = ogr.CreateGeometryFromWkt(wkt2)
union = poly1.Union(poly2)
print(f'poly1: {poly1}')
print(f'poly2: {poly2}')
print(f'union: {union.ExportToWkt()}')
| 41.671429 | 208 | 0.782996 |
2e1cd67fa9c46a1e222aea71efa3b999151f6255
| 6,514 |
py
|
Python
|
test/test_npu/test_ne.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_ne.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_ne.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import torch
import numpy as np
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNe(TestCase):
def cpu_op_exec_scalar(self, input1, other):
output = torch.ne(input1, other)
output = output.numpy()
return output
def npu_op_exec_scalar(self,input1, other):
output = torch.ne(input1, other)
output1 = output.to("cpu")
output2 = output1.numpy()
return output2
def cpu_op_exec(self, input1, other):
output = torch.ne(input1, other)
output = output.numpy()
return output
def npu_op_exec(self,input1, other):
output = torch.ne(input1, other)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_(self,input1, other):
torch.ne_(input1,other)
output = input1.numpy()
return output
def npu_op_exec_(self,input1, other):
torch.ne_(input1, other)
output = input1.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar_(self,input1, other):
torch.ne_(input1,other)
output = input1.numpy()
return output
def npu_op_exec_scalar_(self,input1, other):
torch.ne_(input1, other)
output = input1.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar_out(self,input1,other, out):
torch.ne(input1,other, out=out)
output = out.numpy()
return output
def npu_op_exec_scalar_out(self,input1, other, out):
torch.ne(input1, other, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_out(self,input1,other, out):
torch.ne(input1,other, out=out)
output = out.numpy()
return output
def npu_op_exec_out(self,input1, other, out):
torch.ne(input1, other, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def test_ne_scalar_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2,4, 3)], 3],
[[np.float32, 3, (2, 3)], 2],
[[np.float32, 0, (3, 2)], 8],
[[np.int8, 0 , (4, 3)],3],
[[np.uint8, -1, (2,4, 3)],3],
[[np.int32, 0, (2, 6)],6]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_output = self.cpu_op_exec_scalar(cpu_input1, item[1])
npu_output = self.npu_op_exec_scalar(npu_input1, item[1])
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], [np.float32,0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], [np.float32, 3, (2, 3)]],
[[np.float32, 0, (3, 2)], [np.float32, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], [np.int8, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], [np.uint8, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], [np.int32, 0, (2, 6)]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_input2, npu_input2 = create_common_tensor(item[1], 1, 10)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_scalar_out_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], 2, [np.bool, 0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], 3, [np.bool, -1, (2, 3)]],
[[np.float32, 0, (3, 2)], 4, [np.bool, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], 5, [np.bool, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], 6, [np.bool, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], 7, [np.bool, 0, (2, 6)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_out, npu_out = create_common_tensor(item[2], 1, 10)
cpu_output = self.cpu_op_exec_scalar_out(cpu_input1, item[1], cpu_out)
npu_output = self.npu_op_exec_scalar_out(npu_input1, item[1], npu_out)
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_out_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], [np.float32,0 , (2, 4, 3)], [np.bool, 0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], [np.float32, 3, (2, 3)], [np.bool, -1, (2, 3)]],
[[np.float32, 0, (3, 2)], [np.float32, 0, (3, 2)], [np.bool, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], [np.int8, 0 , (4, 3)], [np.bool, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], [np.uint8, -1, (2,4, 3)], [np.bool, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], [np.int32, 0, (2, 6)], [np.bool, 0, (2, 6)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_input2, npu_input2 = create_common_tensor(item[1], 1, 10)
cpu_out, npu_out = create_common_tensor(item[2], 1, 10)
cpu_output = self.cpu_op_exec_out(cpu_input1, cpu_input2, cpu_out)
npu_output = self.npu_op_exec_out(npu_input1, npu_input2, npu_out)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestNe, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 40.459627 | 99 | 0.562327 |
d89d003bb7f2ea99bc1e7bd32b9f2292f5f699ed
| 402 |
py
|
Python
|
Properties/analysis/complexity metrics/complexity/PMSmetrics.py
|
NazaninBayati/SCA
|
74e670462dd0da5e24147aab86df393b38405176
|
[
"MIT"
] | null | null | null |
Properties/analysis/complexity metrics/complexity/PMSmetrics.py
|
NazaninBayati/SCA
|
74e670462dd0da5e24147aab86df393b38405176
|
[
"MIT"
] | null | null | null |
Properties/analysis/complexity metrics/complexity/PMSmetrics.py
|
NazaninBayati/SCA
|
74e670462dd0da5e24147aab86df393b38405176
|
[
"MIT"
] | null | null | null |
db = open("Project Metrics Summary.txt","r")
db = db.read()
db_st=[]
db_st2=[]
db_st = db.split("\n")
#print(db_st.__len__())
i = 0
db_list=[]
print(db_st[0])
#print(db.split("\n"))
db_temp = db_st[2:db_st.__len__()-1]
db_st=db_temp
#print(db_st)
with open('Project Metrics Summary Report.txt', 'w') as filehandle:
for listitem in db_st:
filehandle.write('%s\n' % listitem)
#print(db_st)
| 20.1 | 67 | 0.659204 |
d8cc710dba9b48a6f8d274fb7baf7678071cc322
| 2,745 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-12T06:52:43.000Z
|
2022-01-12T06:52:43.000Z
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | null | null | null |
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle_crf as crf
import paddle.nn.functional as F
class JointModel(paddle.nn.Layer):
def __init__(self, vocab_size, embedding_size, hidden_size, num_intents, num_slots, num_layers=1, drop_p=0.1):
super(JointModel, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.drop_p = drop_p
self.num_intents = num_intents
self.num_slots = num_slots
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.dropout = nn.Dropout(p=drop_p)
self.layer_norm = nn.LayerNorm(2*hidden_size)
self.bilstm = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size, direction="bidirectional", num_layers=num_layers, dropout=drop_p)
self.ner_classifier = nn.Linear(hidden_size*2, num_slots+2)
self.intent_classifier = nn.Linear(hidden_size*2, num_intents)
self.crf = crf.LinearChainCrf(num_slots, crf_lr=0.001, with_start_stop_tag=True)
self.crf_loss = crf.LinearChainCrfLoss(self.crf)
self.viterbi_decoder = crf.ViterbiDecoder(self.crf.transitions)
def forward(self, inputs, lens):
batch_size, seq_len = inputs.shape
inputs_embedding = self.embedding(inputs)
if self.drop_p:
inputs_embedding = self.dropout(inputs_embedding)
lstm_outputs, _ = self.bilstm(inputs_embedding)
lstm_outputs = self.layer_norm(lstm_outputs)
emissions = self.ner_classifier(lstm_outputs)
indices = paddle.stack([paddle.arange(batch_size), lens-1], axis=1)
last_step_hiddens = paddle.gather_nd(lstm_outputs, indices)
intent_logits = self.intent_classifier(last_step_hiddens)
return emissions, intent_logits
def get_slot_loss(self, features, lens, tags):
slot_loss = self.crf_loss(features, lens, tags)
slot_loss = paddle.mean(slot_loss)
return slot_loss
def get_intent_loss(self, intent_logits, intent_labels):
return F.cross_entropy(intent_logits, intent_labels)
| 38.125 | 147 | 0.718033 |
2b71197dcae926c71c551b473549219aad6d1372
| 1,047 |
py
|
Python
|
Kapitel_1/_E1_int_string_adder.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | 1 |
2020-12-24T15:42:54.000Z
|
2020-12-24T15:42:54.000Z
|
Kapitel_1/_E1_int_string_adder.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
Kapitel_1/_E1_int_string_adder.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
# --- Diese Klasse soll demonstrieren, dass das 'other'-Arguemnt der 'Dunder'-Methods alles sein darf ---#
class IntStringAdder(int):
def __init__(self, number):
self.number = number
def __add__(self, other):
if isinstance(other, str):
try:
x = int(other)
except:
raise ValueError(f"String Value >{other}< cannot be converted to 'int'.")
else:
raise TypeError("Wrong datatype, expected a 'str' as 2nd operand.")
return IntStringAdder(self.number + x)
def __str__(self):
return f"My Value is {self.number}"
# --- Instanziierung der Klasse mittels Konstruktor --- #
my_number = IntStringAdder(10)
# --- Addition mittels expliziter Syntax und implizitem Methodenaufruf --- #
# --- Die Rückgabe ist eine neue Instanz der Klasse --- #
my_new_number = my_number + '15'
print(my_new_number)
# --- Wirft einen Error, da sich der str-Wert 'Simon' nicht in einen Integer umwandeln lässt --- #
my_new_number = my_number + 'Simon'
| 33.774194 | 106 | 0.642789 |
515e4d71ccc6bd2511e5a622dda3504f52bb9362
| 4,902 |
py
|
Python
|
packages/watchmen-rest-doll/src/watchmen_rest_doll/system/external_writer_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-doll/src/watchmen_rest_doll/system/external_writer_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-doll/src/watchmen_rest_doll/system/external_writer_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from fastapi import APIRouter, Body, Depends
from watchmen_auth import PrincipalService
from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator
from watchmen_meta.system import ExternalWriterService
from watchmen_model.admin import UserRole
from watchmen_model.common import DataPage, ExternalWriterId, Pageable
from watchmen_model.system import ExternalWriter
from watchmen_rest import get_any_admin_principal, get_super_admin_principal
from watchmen_rest.util import raise_400, raise_403, raise_404
from watchmen_rest_doll.doll import ask_tuple_delete_enabled
from watchmen_rest_doll.util import trans, trans_readonly
from watchmen_utilities import is_blank
router = APIRouter()
def get_external_writer_service(principal_service: PrincipalService) -> ExternalWriterService:
return ExternalWriterService(ask_meta_storage(), ask_snowflake_generator(), principal_service)
@router.get('/external_writer', tags=[UserRole.ADMIN, UserRole.SUPER_ADMIN], response_model=ExternalWriter)
async def load_external_writer_by_id(
writer_id: Optional[ExternalWriterId] = None,
principal_service: PrincipalService = Depends(get_any_admin_principal)
) -> ExternalWriter:
if is_blank(writer_id):
raise_400('External writer id is required.')
if not principal_service.is_super_admin():
if writer_id != principal_service.get_tenant_id():
raise_403()
external_writer_service = get_external_writer_service(principal_service)
def action() -> ExternalWriter:
# noinspection PyTypeChecker
external_writer: ExternalWriter = external_writer_service.find_by_id(writer_id)
if external_writer is None:
raise_404()
return external_writer
return trans_readonly(external_writer_service, action)
@router.post('/external_writer', tags=[UserRole.SUPER_ADMIN], response_model=ExternalWriter)
async def save_external_writer(
external_writer: ExternalWriter, principal_service: PrincipalService = Depends(get_super_admin_principal)
) -> ExternalWriter:
external_writer_service = get_external_writer_service(principal_service)
# noinspection DuplicatedCode
def action(writer: ExternalWriter) -> ExternalWriter:
if external_writer_service.is_storable_id_faked(writer.writerId):
external_writer_service.redress_storable_id(writer)
# noinspection PyTypeChecker
writer: ExternalWriter = external_writer_service.create(writer)
else:
# noinspection PyTypeChecker
writer: ExternalWriter = external_writer_service.update(writer)
return writer
return trans(external_writer_service, lambda: action(external_writer))
class QueryExternalWriterDataPage(DataPage):
data: List[ExternalWriter]
@router.post(
'/external_writer/name', tags=[UserRole.ADMIN, UserRole.SUPER_ADMIN], response_model=QueryExternalWriterDataPage)
async def find_external_writers_by_name(
query_name: Optional[str] = None, pageable: Pageable = Body(...),
principal_service: PrincipalService = Depends(get_any_admin_principal)
) -> QueryExternalWriterDataPage:
external_writer_service = get_external_writer_service(principal_service)
# noinspection DuplicatedCode
def action() -> QueryExternalWriterDataPage:
tenant_id = None
if principal_service.is_tenant_admin():
tenant_id = principal_service.get_tenant_id()
if is_blank(query_name):
# noinspection PyTypeChecker
return external_writer_service.find_by_text(None, tenant_id, pageable)
else:
# noinspection PyTypeChecker
return external_writer_service.find_by_text(query_name, tenant_id, pageable)
return trans_readonly(external_writer_service, action)
@router.get(
"/external_writer/all", tags=[UserRole.ADMIN], response_model=List[ExternalWriter])
async def find_all_external_writers(
principal_service: PrincipalService = Depends(get_any_admin_principal)) -> List[ExternalWriter]:
tenant_id = None
if principal_service.is_tenant_admin():
tenant_id = principal_service.get_tenant_id()
external_writer_service = get_external_writer_service(principal_service)
def action() -> List[ExternalWriter]:
return external_writer_service.find_all(tenant_id)
return trans_readonly(external_writer_service, action)
@router.delete('/external_writer', tags=[UserRole.SUPER_ADMIN], response_model=ExternalWriter)
async def delete_external_writer_by_id(
writer_id: Optional[ExternalWriterId] = None,
principal_service: PrincipalService = Depends(get_super_admin_principal)
) -> ExternalWriter:
if not ask_tuple_delete_enabled():
raise_404('Not Found')
if is_blank(writer_id):
raise_400('External writer id is required.')
external_writer_service = get_external_writer_service(principal_service)
def action() -> ExternalWriter:
# noinspection PyTypeChecker
external_writer: ExternalWriter = external_writer_service.delete(writer_id)
if external_writer is None:
raise_404()
return external_writer
return trans(external_writer_service, action)
| 37.136364 | 114 | 0.825989 |
850899eec319e2500a48436535153540cac42d7e
| 1,191 |
py
|
Python
|
exercises/python/data-types/basic/nested-list.py
|
rogeriosantosf/hacker-rank-profile
|
d4b9c131524d138c415e5c5de4e38c6b8c35dd77
|
[
"MIT"
] | null | null | null |
exercises/python/data-types/basic/nested-list.py
|
rogeriosantosf/hacker-rank-profile
|
d4b9c131524d138c415e5c5de4e38c6b8c35dd77
|
[
"MIT"
] | null | null | null |
exercises/python/data-types/basic/nested-list.py
|
rogeriosantosf/hacker-rank-profile
|
d4b9c131524d138c415e5c5de4e38c6b8c35dd77
|
[
"MIT"
] | null | null | null |
# Given the names and grades for each student in a class of students,
# store them in a nested list and print the name(s) of any student(s)
# having the second lowest grade.
# Note: If there are multiple students with the second lowest grade,
# order their names alphabetically and print each name on a new line.
# Sample Input:
# 5
# Harry
# 37.21
# Berry
# 37.21
# Tina
# 37.2
# Sample Output:
# Berry
# Harry
if __name__ == '__main__':
students = []
for _ in range(int(input())):
name = input()
score = float(input())
students.append([name, score])
students = sorted(students, key=lambda student: student[1])
lowest_grade = students[0][1]
second_lowest_grade = None
second_lowest_names = []
for i in range(len(students)):
if students[i][1] > lowest_grade:
if second_lowest_grade == None:
second_lowest_grade = students[i][1]
second_lowest_names.append(students[i][0])
elif students[i][1] == second_lowest_grade:
second_lowest_names.append(students[i][0])
second_lowest_names.sort()
for name in second_lowest_names:
print(name)
| 25.891304 | 71 | 0.641478 |
5ccb76dd9da3cb156598bffc9e3ef0a2861567f8
| 3,544 |
py
|
Python
|
resources/mechanics_lib/Fulcrum.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 7 |
2016-01-20T02:33:00.000Z
|
2021-02-04T04:06:57.000Z
|
resources/mechanics_lib/Fulcrum.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | null | null | null |
resources/mechanics_lib/Fulcrum.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 3 |
2016-10-05T07:20:30.000Z
|
2017-11-20T10:36:50.000Z
|
from api.component import Component
class Fulcrum(Component):
def defComponents(self):
# Subcomponents used in this assembly
self.addSubcomponent("stem", "Hinge")
self.addSubcomponent("left", "RectBeam")
self.addSubcomponent("right", "RectBeam")
self.addSubcomponent("t", "TJoint")
def defParameters(self):
# Subcomponent free parameters are inherited by default
# Subcomponent parameters that are no longer free in this assembly are deleted
'''
self.delParameter("length")
self.delParameter("width")
self.delParameter("depth")
self.delParameter("angle")
self.delParameter("rangle")
self.delParameter("langle")
self.delParameter("phase")
self.delParameter("noflap")
self.delParameter("faces")
'''
# New free parameters specific to this assembly are added
self.newParameter("leftlength")
self.newParameter("rightlength")
self.newParameter("stemwidth")
self.newParameter("crosswidth")
self.newParameter("thickness")
def defInterfaces(self):
# Locations on FixedLegs component that higher order components can use for assembly
self.newInterface("stemedge")
self.newInterface("leftedge")
self.newInterface("rightedge")
self.newInterface("lefttab")
def defConstraints(self):
### Set specific relationships between parameters
self.addConstraint(("stem", "perimeter"), ("stemwidth", "thickness"), "2 * sum(x)")
self.addConstraint(("stem", "top"), ("stemwidth", "thickness"), "(x[1]-x[0]) * 1.0 / sum(x)")
self.addConstraint(("stem", "bot"), ("stemwidth", "thickness"), "(x[1]-x[0]) * 1.0 / sum(x)")
self.addConstraint(("left", "depth"), ("thickness"))
self.addConstraint(("left", "width"), ("crosswidth"))
self.addConstraint(("left", "length"), ("leftlength"))
self.addConstraint(("right", "depth"), ("thickness"))
self.addConstraint(("right", "width"), ("crosswidth"))
self.addConstraint(("right", "length"), ("rightlength"))
self.addConstraint(("t", "thickness"), "thickness")
self.addConstraint(("t", "crosswidth"), "crosswidth")
self.addConstraint(("t", "stemwidth"), "stemwidth")
def defConnections(self):
self.addConnection(("t", "leftedge"),
("left", "botedge.0.3"), "Flat")
self.addConnection(("t", "rightedge"),
("right", "topedge.0.3"), "Flat")
self.addConnection(("t", "stemedge"),
("stem", "topedge.1"),
"Fold", angle=(-70.5/2))
# XXX Not well shaped -- leaves overhang
self.addConnection(("t", "stemtab"),
("stem", "topedge.3"),
"Tab",
name="tab", depth=10, angle=(-70.5/2))
def defInterfaces(self):
# Define interface locations in terms of subcomponent interfaces
self.inheritInterface("stemedge", ("stem", "botedge"))
self.inheritInterface("lefttab", ("left", "tabedge"))
self.inheritInterface("leftedge", ("left", "topedge"))
self.inheritInterface("rightedge", ("right", "botedge"))
if __name__ == "__main__":
# Instantiate new object
f = Fulcrum()
# Define free parameters
f.setParameter("thickness", 10)
f.setParameter("stemwidth", 20)
f.setParameter("crosswidth", 30)
f.setParameter("leftlength", 50)
f.setParameter("rightlength", 100)
# Generate outputs
f.make()
f.drawing.graph.toSTL("output/tbar.stl")
f.drawing.transform(relative=(0,0))
import utils.display
utils.display.displayTkinter(f.drawing)
| 35.79798 | 97 | 0.638544 |
7797ba9b8e78f7c518aa5d04ea99606c72938a01
| 1,146 |
py
|
Python
|
project/forms/admin_unit_member.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
project/forms/admin_unit_member.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
project/forms/admin_unit_member.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
from flask_babelex import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import SubmitField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired
from project.forms.widgets import MultiCheckboxField
class InviteAdminUnitMemberForm(FlaskForm):
email = EmailField(lazy_gettext("Email"), validators=[DataRequired()])
roles = MultiCheckboxField(lazy_gettext("Roles"))
submit = SubmitField(lazy_gettext("Invite"))
class NegotiateAdminUnitMemberInvitationForm(FlaskForm):
accept = SubmitField(lazy_gettext("Accept"))
decline = SubmitField(lazy_gettext("Decline"))
class DeleteAdminUnitInvitationForm(FlaskForm):
submit = SubmitField(lazy_gettext("Delete invitation"))
email = EmailField(lazy_gettext("Email"), validators=[DataRequired()])
class DeleteAdminUnitMemberForm(FlaskForm):
submit = SubmitField(lazy_gettext("Delete member"))
email = EmailField(lazy_gettext("Email"), validators=[DataRequired()])
class UpdateAdminUnitMemberForm(FlaskForm):
roles = MultiCheckboxField(lazy_gettext("Roles"))
submit = SubmitField(lazy_gettext("Update member"))
| 33.705882 | 74 | 0.787958 |
24987bca0ce019f2e50599e070edc7e7f513338a
| 240 |
py
|
Python
|
exercises/ja/exc_03_16_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/exc_03_16_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/exc_03_16_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# parserを無効化
with ____.____(____):
# テキストを処理する
doc = ____
# docの固有表現を表示
print(____)
| 16 | 42 | 0.691667 |
700381ccb012800fd290fe6f050f23dcad6553fa
| 878 |
py
|
Python
|
gshiw/quotes_web/quotes/adminx.py
|
superlead/gsw
|
fc2bb539e3721cc554b4116b553befd653d2ec74
|
[
"MIT"
] | null | null | null |
gshiw/quotes_web/quotes/adminx.py
|
superlead/gsw
|
fc2bb539e3721cc554b4116b553befd653d2ec74
|
[
"MIT"
] | null | null | null |
gshiw/quotes_web/quotes/adminx.py
|
superlead/gsw
|
fc2bb539e3721cc554b4116b553befd653d2ec74
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from quotes_web.adminx import BaseAdmin
import xadmin
from .models import Quotes, Categories, Works, Writers, Speakers, Topics
class QuotesAdmin(BaseAdmin):
exclude = ('owner', 'view_nums', 'dig_nums')
xadmin.site.register(Quotes, QuotesAdmin)
class CategoryAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Categories, CategoryAdmin)
class WorkAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Works, WorkAdmin)
class WriterAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Writers, WriterAdmin)
class SpeakerAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Speakers, SpeakerAdmin)
class TopicAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Topics, TopicAdmin)
| 22.512821 | 72 | 0.741458 |
7026c91c039c9c3b05345c27e403b5cd3b5c3e50
| 1,286 |
py
|
Python
|
INBa/2015/Mitin_D_S/task_8_15.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Mitin_D_S/task_8_15.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Mitin_D_S/task_8_15.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 8. Вариант 15.
# Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка.
# Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений.
# Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.
# Mitin D.S.
# 19.04.2016, 11:08
import random
ochki = 500000
slova = ("питон", "программирование", "компьютер", "университет", "россия", "безопасность", "информатика")
zagadka=random.choice(slova)
proverka = zagadka
i=0
jumble = ""
while zagadka:
bykva = random.randrange(len(zagadka))
jumble += zagadka[bykva]
zagadka = zagadka[:bykva] + zagadka[(bykva+1):]
print("Вы попали в передачу 'Анаграммы'")
print("Загаданное слово: ", jumble)
slovo = input ("Ваш ответ: ")
while (slovo != proverka):
if(slovo == "не знаю"):
print(i,"буква: ",proverka[i])
i+=1
if ochki <= 0:
break
slovo=input("Неправильно. Попробуй еще раз: ")
ochki-=50000
if slovo == proverka:
print("\nПравильно! Это слово: ", proverka)
print("Вы набрали",ochki," очков! Поздравляем!")
else:
print("К сожалению, у вас 0 очков, и вы проиграли :( Загаданное слово:",proverka)
input ("Нажмите ENTER для продолжения")
| 34.756757 | 139 | 0.717729 |
7043ae6313fef14181ab3cef6e00e0df705b57cc
| 2,149 |
py
|
Python
|
utilities/HSV_detection.py
|
jlittek/Anki-Vector
|
1478885955dc142c70d92c6a9e24ef9e8fd5cb18
|
[
"MIT"
] | null | null | null |
utilities/HSV_detection.py
|
jlittek/Anki-Vector
|
1478885955dc142c70d92c6a9e24ef9e8fd5cb18
|
[
"MIT"
] | null | null | null |
utilities/HSV_detection.py
|
jlittek/Anki-Vector
|
1478885955dc142c70d92c6a9e24ef9e8fd5cb18
|
[
"MIT"
] | null | null | null |
from cv2 import cv2
import numpy as np
import anki_vector
from anki_vector.util import distance_mm, speed_mmps, degrees
def empty(a):
pass
robot=anki_vector.Robot()
robot.connect()
robot.camera.init_camera_feed()
robot.behavior.set_lift_height(0.0)
robot.behavior.set_head_angle(degrees(0))
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 640, 600)
cv2.createTrackbar("Hue Min", "TrackBars", 10, 179, empty)
cv2.createTrackbar("Hue Max", "TrackBars", 47, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 66, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 186, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 171, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
while True:
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
img = np.array(robot.camera.latest_image.raw_image)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
imgBlur = cv2.GaussianBlur(img, (3,3), 1)
imgHSV = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2HSV)
print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
# Alternative method to find the Ball: Approximation of the area with a Polygon.
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02*peri,True)
objCor = len(approx) # Number of corners
print(objCor)
x, y, w, h = cv2.boundingRect(approx)
if objCor > 6:
cv2.circle(img, center=(int(x+w/2), int(y+h/2)), radius=int((h)/2), color=(0, 255, 0), thickness=3)
cv2.imshow("Camera", img)
cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| 35.229508 | 111 | 0.674267 |
567805096414549ca01eb464e1dd6912f98739a2
| 267 |
py
|
Python
|
DataProcess/config.py
|
zhangupkai/RFID_Script
|
9e05fad86e71dc6bd5dd12650d369f13d5a835c8
|
[
"MIT"
] | null | null | null |
DataProcess/config.py
|
zhangupkai/RFID_Script
|
9e05fad86e71dc6bd5dd12650d369f13d5a835c8
|
[
"MIT"
] | null | null | null |
DataProcess/config.py
|
zhangupkai/RFID_Script
|
9e05fad86e71dc6bd5dd12650d369f13d5a835c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project :DataProcess
@File :config.py
@Author :Zhang Qihang
@Date :2021/11/8 13:21
"""
READ_PRINT_FILES_PATH = "../data/read_print"
HOP_FILES_PATH = "../data/hop"
DELTA = 0
REFER_CHANNEL = 923.125
HAMPEL = 8
| 17.8 | 44 | 0.655431 |
569488138911262f4b1392cb7a8e883d673af59a
| 312 |
py
|
Python
|
exercises/es/test_03_14_03.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/test_03_14_03.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/test_03_14_03.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "¿Estás usando nlp.pipe envuelto en una lista?"
__msg__.good(
"¡Buen trabajo! Ahora continuemos con un ejemplo práctico que usa nlp.pipe "
"para procesar documentos con metadatos adicionales."
)
| 31.2 | 84 | 0.653846 |
3b0cf254e84996d64feead20ef75a0dde786add0
| 100 |
py
|
Python
|
py/jpy/ci/appveyor/dump-dlls.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 210 |
2015-03-19T14:07:16.000Z
|
2022-03-31T19:28:13.000Z
|
py/jpy/ci/appveyor/dump-dlls.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 943 |
2021-05-10T14:00:02.000Z
|
2022-03-31T21:28:15.000Z
|
py/jpy/ci/appveyor/dump-dlls.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 40 |
2015-10-17T13:53:41.000Z
|
2021-07-18T20:09:11.000Z
|
import psutil, os
p = psutil.Process(os.getpid())
for dll in p.memory_maps():
print(dll.path)
| 14.285714 | 31 | 0.68 |
8dc1a5cf9af6d9450e0358bd13bcee9c29be28ac
| 2,328 |
py
|
Python
|
storage/baidu_cloud.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
storage/baidu_cloud.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
storage/baidu_cloud.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
bypy,第一次运行时需要授权,只需跑任何一个命令(比如 bypy info)然后跟着说明(登陆等)来授权即可。
授权只需一次,一旦成功,以后不会再出现授权提示.
更详细的了解某一个命令:bypy help <command>
显示在云盘(程序的)根目录下文件列表:bypy list
把当前目录同步到云盘:bypy syncup or bypy upload
把云盘内容同步到本地来:bypy syncdown or bypy downdir /
比较本地当前目录和云盘(程序的)根目录(个人认为非常有用):bypy compare
更多命令和详细解释请见运行bypy的输出。
调试
运行时添加-v参数,会显示进度详情。
运行时添加-d,会显示一些调试信息。
运行时添加-ddd,还会会显示HTTP通讯信息(警告:非常多)
经验分享,请移步至wiki,方便分享/交流。
"""
from bypy import ByPy#,gui
import os,sys,psutil
from service import png
"""-------------------------------------------------------------------------"""
_paths=["/media/sfd/1CEE36D0EE36A1C6/core/","/media/sfd/LENOVO/SFD_assistant/core/"]
for _path in _paths:
if os.path.exists(_path):
base_path=_path
externalPaths=[base_path + 'basic_linux' ,
base_path + 'intelligent_device' ,
base_path + 'knowledge_continue' ,
]
#os.listdir(externalPaths[0])
"""-------------------------------------------------------------------------"""
def make_paths(paths=[]):#用于初始化开发环境
if paths:
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
return "创建成功"
else:
return "请输入路径列表"
#make_paths(externalPaths)
"""-------------------------------------------------------------------------"""
def test():
bp=ByPy()
bp.list("basic_linux/") # or whatever instance methods of ByPy class
bp.syncup(base_path)
bp.syndown("/apps/bypy",base_path)
bp.downfile("basic_linux/wps-office_10.1.0.6634_amd64.deb",externalPaths[0])
bp.downfile("basic_linux/can_google.crx",externalPaths[0])
#gui.BypyGui()
"""-------------------------------------------------------------------------"""
def qpython_sync(current_dir="/*/",file_name="*.py"):
move_path="/run/user/1000/gvfs/mtp:host=%5Busb%3A001%2C002%5D/Internal storage/qpython"
sourceFile=os.getcwd()+"/storage/emulated/0/qpython"+current_dir+file_name
targetFile=move_path+current_dir+file_name
if os.path.isfile(sourceFile):
with open(sourceFile, "rb") as source:
with open(targetFile, "wb") as copy:
copy.write(source.read())
print("copy success")
| 32.788732 | 92 | 0.561856 |
c63ad1f80a2cb5614fe8283a636d42a91bd2065a
| 612 |
py
|
Python
|
radiomics/utility.py
|
RimeT/p3_radio
|
3d522a4356c62255cd93c6d74eb388a2e474dd00
|
[
"Apache-2.0"
] | null | null | null |
radiomics/utility.py
|
RimeT/p3_radio
|
3d522a4356c62255cd93c6d74eb388a2e474dd00
|
[
"Apache-2.0"
] | null | null | null |
radiomics/utility.py
|
RimeT/p3_radio
|
3d522a4356c62255cd93c6d74eb388a2e474dd00
|
[
"Apache-2.0"
] | null | null | null |
import logging
def get_logger(log_file=None, name='radiomics_logger'):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# stream handler will send message to stdout
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if log_file is not None:
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
| 30.6 | 105 | 0.681373 |
cc45fbee123019327799d384f3210a7383d1d239
| 5,153 |
py
|
Python
|
20-hs-redez-sem/groups/05-decentGames/src/DontGetAngry.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-hs-redez-sem/groups/05-decentGames/src/DontGetAngry.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-hs-redez-sem/groups/05-decentGames/src/DontGetAngry.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
import copy
import json
import os
import random
import socket
import sys
import xmlrpc.client
import State
from AbsGame import AbsGame, MY_IP
from DGA import DGA
from Exceptions import FileAlreadyExists
from GameInformation import GameInformation
class DontGetAngry(AbsGame):
is_looping = True
def _sync_log(self) -> None:
pass
def fetch(self):
n = self.__ginfo.get_seq()
self._fetch_lines(self.__game_path, n, self.__ip1, self.__ip2)
def refresh(self):
print('Refreshing')
pass
# with xmlrpc.client.ServerProxy("http://%s:8001/" % self.__ip1) as proxy:
# file_string = proxy.is_even(self.__game_path)
#
# # Only refresh if it is the next sequence number
# if DGA(json.loads(file_string)).get_seq() == self.__ginfo.get_seq() + 1:
# with open(self.__game_path, 'w') as f:
# f.write(file_string + '\n')
# f.close()
# return
#
# with xmlrpc.client.ServerProxy("http://%s:8001/" % self.__ip2) as proxy:
# file_string = proxy.is_even(self.__game_path)
#
# # Only refresh if it is the next sequence number
# if DGA(json.loads(file_string)).get_seq() == self.__ginfo.get_seq() + 1:
# with open(self.__game_path, 'w') as f:
# f.write(file_string + '\n')
# f.close()
def __init__(self, game_id: str, ip1: str, ip2):
self.__game_id = game_id
self.__game_path = 'games/%s.dga' % game_id
self.__ip1 = ip1
self.__ip2 = ip2
self.__playable = False
self.__game_is_updated = False
if game_id is not None:
with open(self.__game_path, 'r') as f:
time, game_info = f.read().splitlines()[-1].split('$')
self.__ginfo: DGA = DGA(json.loads(game_info))
self.__curr_game = self.__ginfo.get_board()
if self._validate(self.__curr_game):
if not self.__ginfo.game_is_initiated():
if self.__ginfo.can_i_update():
self._update()
print('Game is leaving the loop')
self.is_looping = False
if self.__ginfo.get_player(self._get_turn_of()) == self.get_who_am_i()\
and self.get_ginfo().get_status() == State.ONGOING:
self.__playable = True
else:
print('Not validated?')
def get_turn_of(self) -> str:
p = self._get_turn_of()
return p + ': ' + self.__ginfo.get_player(p)
def get_who_am_i(self) -> str:
return list(self.__ginfo.get_dic().keys())[list(self.__ginfo.get_dic().values()).index(self.__ginfo.get_mac())]
def get_allowed_moves(self):
return [1, 2, 3, 4, 5, 6]
def move(self, move: str):
move = random.randint(1, 6)
if self._get_playable():
self.__ginfo.apply_move(move)
self.get_ginfo().inc_seq()
self._update()
self._set_playable(False)
else:
print('You cannot make a move.')
def get_ginfo(self):
return self.__ginfo
def forfeit(self):
return 'Not possible in this game'
def _get_playable(self):
return self.__playable
def _set_playable(self, state: bool):
self.__playable = state
def _update(self) -> None:
with open(self.__game_path, 'a') as f:
f.write(self.get_time() + str(self.__ginfo) + '\n')
f.close()
self.ping_the_updates(self.__game_path, self.__ip1, self.__ip2, MY_IP)
def _validate(self, curr_board: dict) -> bool:
with open(self.__game_path, 'r')as f:
lines = f.read().splitlines()
second_last_line = lines[-2]
prev_ginfo = DGA(json.loads(second_last_line.split('$')[1]))
# Check if same file/string
if str(self.__ginfo) == str(prev_ginfo):
print('Absolute same string')
self.__game_is_updated = False
return True
prev_board = prev_ginfo.get_board()
# Check only board
if str(prev_board) == str(curr_board):
print('Same board, but other things changed')
self.__game_is_updated = True
return True
# Check if moves before were legit
for move in self.get_allowed_moves():
tmp: DGA = copy.deepcopy(prev_ginfo)
tmp.apply_move(str(move))
if str(tmp.get_board()) == str(curr_board):
self.__game_is_updated = True
print('Valid move was made: %s' % move)
return True
print('An opponent seems to be cheating... Game aborted.')
self.__ginfo.set_status(State.CHEATED)
self.__ginfo.inc_seq()
self._update()
print(self.__ginfo.get_status())
return False
def _get_turn_of(self) -> str:
return self.__ginfo.get_playing_rn()
def _get_game_id(self) -> str:
return self.__game_id
def get_board(self) -> dict:
return self.__curr_game
| 32.408805 | 119 | 0.577334 |
ccc94b51bdd15e2bdac688d0a638f51191e25921
| 85 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/deb.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/deb.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/deb.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
a=input("enter a no")
if a>0:
print "a is positive"
else:
print "a is negative"
| 17 | 23 | 0.635294 |
aed0bbea08182958a4aa6ca4ca13d7c219a63c2c
| 310 |
py
|
Python
|
Problems/Two Pointers/easy/ReverseOnlyLetters/reverse_only_letters.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Two Pointers/easy/ReverseOnlyLetters/reverse_only_letters.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Two Pointers/easy/ReverseOnlyLetters/reverse_only_letters.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
def reverseOnlyLetters(s: str) -> str:
s = list(s)
l, r = 0, len(s) - 1
while l < r:
if not s[l].isalpha():
l += 1
elif not s[r].isalpha():
r -= 1
else:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
return ''.join(s)
| 20.666667 | 38 | 0.354839 |
ee8e723ac890839db5dae21fc3e12e45aceff55c
| 117 |
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-008/ph-8.11-uppercase-lowercase-capitalize.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-008/ph-8.11-uppercase-lowercase-capitalize.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-008/ph-8.11-uppercase-lowercase-capitalize.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
s1 = "Bangladesh"
s_up = s1.upper()
print(s_up)
s_lo = s1.lower()
print(s_lo)
s_cap = s1.capitalize()
print(s_cap)
| 11.7 | 23 | 0.675214 |
4e442ebd5ed6d464aa6b0b32453f08df31ee878b
| 1,284 |
py
|
Python
|
haas_lib_bundles/python/docs/examples/temperature_humidity/haas506/code/gxht30.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/temperature_humidity/haas506/code/gxht30.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/temperature_humidity/haas506/code/gxht30.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
from driver import I2C
class GXHT30(object):
# init i2cDev
def __init__(self,i2cObj):
self.i2cObj=None
if not isinstance(i2cObj,I2C):
raise ValueError("parameter is not an I2C object")
self.i2cObj=i2cObj
# write cmd to register
# commands:0x2c、0x06
def write(self,cmd1,cmd2):
writeBuf=bytearray([cmd1,cmd2])
self.i2cObj.write(writeBuf,2)
# read data from register
# read data from :0x00
# len(data) are 6 bytes : cTemp MSB, cTemp LSB, cTemp CRC, Humididty MSB, Humidity LSB, Humidity CRC
def read(self,cmd,len):
readBuf=bytearray(len)
readBuf[0]=cmd
self.i2cObj.read(readBuf,6)
return readBuf
# convert the data
def covert_data(self,data):
cTemp = ((((data[0] * 256.0) + data[1]) * 175) / 65535.0) - 45
fTemp = cTemp * 1.8 + 32
humidity = 100 * (data[3] * 256 + data[4]) / 65535.0
return cTemp,fTemp,humidity
# measure temperature and humidity
def measure(self):
if self.i2cObj is None:
raise ValueError("invalid I2C object")
self.write(0x2c,0x06)
data=self.read(0x00,6)
cTemp,fTemp,humidity=self.covert_data(data)
return cTemp,fTemp,humidity
| 29.860465 | 105 | 0.60514 |
14d6c765813b314fef595ce2c9a7fb5f971b579b
| 4,879 |
py
|
Python
|
python/en/archive/dropbox/miscellaneous_python_files/data4models_old_old.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/miscellaneous_python_files/data4models_old_old.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/miscellaneous_python_files/data4models_old_old.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
data4models.py
# Sentiment Indentification for Roman Urdu
'''
import numpy as np
import pandas as pd
class Data:
# Constructor
def __init__( self, config ):
self.config = config
def split( self, df ):
'''
Split the (entire) data into training data & test data
'''
assert isinstance( df, pd.DataFrame), 'df must be a pandas.DataFrame.'
test_split_ratio = self.config.test_split_ratio
print(f'Data.preprocess.split: test_split_ratio= {test_split_ratio}' )
reviews = df['review']
sentiments = df['sentiment']
n_dataset = df.shape[0]
n_test = int( n_dataset * test_split_ratio ) # 0.7
n_training = n_dataset - n_test # 0.3
# Use indexcing to split the data.
index_data = np.arange( n_dataset )
index_training = np.random.choice( index_data, n_training, replace=False )
index_test = np.delete( index_data, index_training )
data_training_np = reviews.loc[ index_training ].values
data_test_np = reviews.loc[ index_test ].values
labels_training_np = sentiments.loc[ index_training ].values
labels_test_np = sentiments.loc[ index_test ].values
print(f' number of dataset =', n_dataset )
print(f' np.shape(x_train) =', np.shape(data_training_np) )
print(f' np.shape(y_train) =', np.shape(labels_training_np) )
print(f' np.shape(x_test) =', np.shape(data_test_np) )
print(f' np.shape(y_test) =', np.shape(labels_test_np) )
return data_training_np, labels_training_np, data_test_np, labels_test_np
# x_train, y_train, x_test, y_test
# def __init__( self, x, y, config ):
# self.config = config
# self.x = x # shape = (length, dimension)
# self.y = y # shape = (length,)
def split( self, split_rate=[0.7, 0.2, 0.1] ):
'''
The default ratio to split the training, evaluation, & test data is 7:2:1.
'''
print( 'split_rate = ', split_rate )
length, dimension = np.shape( self.x )
# Split the (entire) data into training data & test data
n_training = int( length * split_rate[0] ) # 0.7
n_evaluation = int( length * split_rate[1] ) # 0.2
n_test = length - n_training - n_evaluation
# Use indexcing to split the data.
index_data = np.arange( length ) # 13704, [0, length-1]
index_training = np.random.choice( index_data, n_training, replace=False ) # 9592
index_temp = np.delete( index_data, index_training ) # 4112
index_evaluation = np.random.choice( index_temp, n_evaluation ) # 2740
index_test = np.delete( index_temp, index_evaluation ) # 3547, This must be 1372!
data_training = self.x[ index_training, : ]
data_evaluation = self.x[ index_evaluation, : ]
data_test = self.x[ index_test, : ]
labels_training = self.y[ index_training ]
labels_evaluation = self.y[ index_evaluation ]
labels_test = self.y[ index_test ]
training = [data_training, labels_training]
evaluation = [data_evaluation, labels_evaluation]
test = [data_test, labels_test]
return training, evaluation, test
# #=====================================================================#
# # The above variables don't have the leading self. to improve readability.
# self.length = length # = size, or n_data
# self.dimension = dimension
#
# self.n_training = n_training
# self.n_test = n_test
def load(self, batch_size):
data_length = len( self.data_training )
if data_length >= batch_size:
# Because of replace=False,
# ValueError: Cannot take a larger sample than population when 'replace=False'
index = np.random.choice( data_length, batch_size, replace=False )
data = self.data_training[ index,: ]
labels = self.labels_training[ index ]
self.data_training = np.delete( self.data_training, index, axis=0 )
self.labels_training = np.delete( self.labels_training, index )
done = True
else: #data_length < batch_size:
self.data_training = self.x[ self.index_training ]
self.labels_training = self.y[ self.index_training ]
done = False
return data, labels, done
# EOF
| 39.991803 | 97 | 0.562615 |
09a66417b9701135332178e35c8c492d4dbc91c1
| 10,661 |
py
|
Python
|
research/cv/metric_learn/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/metric_learn/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/metric_learn/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train resnet."""
import os
import time
import argparse
import ast
import numpy as np
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore.communication.management import init
from mindspore.train.callback import Callback
from src.loss import Softmaxloss
from src.loss import Tripletloss
from src.loss import Quadrupletloss
from src.lr_generator import get_lr
from src.resnet import resnet50
from src.utility import GetDatasetGenerator_softmax, GetDatasetGenerator_triplet, GetDatasetGenerator_quadruplet
set_seed(1)
parser = argparse.ArgumentParser(description='Image classification')
# modelarts parameter
parser.add_argument('--train_url', type=str, default=None, help='Train output path')
parser.add_argument('--data_url', type=str, default=None, help='Dataset path')
parser.add_argument('--ckpt_url', type=str, default=None, help='Pretrained ckpt path')
parser.add_argument('--checkpoint_name', type=str, default='resnet-120_625.ckpt', help='Checkpoint file')
parser.add_argument('--loss_name', type=str, default='softmax',
help='loss name: softmax(pretrained) triplet quadruplet')
# Ascend parameter
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--ckpt_path', type=str, default=None, help='ckpt path name')
parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
parser.add_argument('--device_id', type=int, default=0, help='Device id')
parser.add_argument('--run_modelarts', type=ast.literal_eval, default=False, help='Run distribute')
args_opt = parser.parse_args()
class Monitor(Callback):
"""Monitor"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def epoch_begin(self, run_context):
self.losses = []
self.epoch_time = time.time()
dataset_generator.__init__(data_dir=DATA_DIR, train_list=TRAIN_LIST)
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / cb_params.batch_num
print("epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:8.5f}"
.format(epoch_mseconds, per_step_mseconds, np.mean(self.losses)))
print('batch_size:', config.batch_size, 'epochs_size:', config.epoch_size,
'lr_model:', config.lr_decay_mode, 'lr:', config.lr_max, 'step_size:', step_size)
def step_begin(self, run_context):
self.step_time = time.time()
def step_end(self, run_context):
"""step_end"""
cb_params = run_context.original_args()
step_mseconds = (time.time() - self.step_time) * 1000
step_loss = cb_params.net_outputs
if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
step_loss = step_loss[0]
if isinstance(step_loss, Tensor):
step_loss = np.mean(step_loss.asnumpy())
self.losses.append(step_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num
print("epochs: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:8.5f}/{:8.5f}], time:[{:5.3f}], lr:[{:8.5f}]".format(
cb_params.cur_epoch_num, config.epoch_size, cur_step_in_epoch, cb_params.batch_num, step_loss,
np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))
if __name__ == '__main__':
if args_opt.loss_name == 'softmax':
from src.config import config0 as config
from src.dataset import create_dataset0 as create_dataset
elif args_opt.loss_name == 'triplet':
from src.config import config1 as config
from src.dataset import create_dataset1 as create_dataset
elif args_opt.loss_name == 'quadruplet':
from src.config import config2 as config
from src.dataset import create_dataset1 as create_dataset
else:
print('loss no')
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
# init distributed
if args_opt.run_modelarts:
import moxing as mox
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
local_data_url = '/cache/data'
local_ckpt_url = '/cache/ckpt'
local_train_url = '/cache/train'
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
local_data_url = os.path.join(local_data_url, str(device_id))
local_ckpt_url = os.path.join(local_ckpt_url, str(device_id))
mox.file.copy_parallel(args_opt.data_url, local_data_url)
mox.file.copy_parallel(args_opt.ckpt_url, local_ckpt_url)
DATA_DIR = local_data_url + '/'
else:
if args_opt.run_distribute:
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
else:
context.set_context(device_id=args_opt.device_id)
device_num = 1
device_id = args_opt.device_id
DATA_DIR = args_opt.dataset_path + '/'
# create dataset
TRAIN_LIST = DATA_DIR + 'train_half.txt'
if args_opt.loss_name == 'softmax':
dataset_generator = GetDatasetGenerator_softmax(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
elif args_opt.loss_name == 'triplet':
dataset_generator = GetDatasetGenerator_triplet(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
elif args_opt.loss_name == 'quadruplet':
dataset_generator = GetDatasetGenerator_quadruplet(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
else:
print('loss no')
dataset = create_dataset(dataset_generator, do_train=True, batch_size=config.batch_size,
device_num=device_num, rank_id=device_id)
step_size = dataset.get_dataset_size()
# define net
net = resnet50(class_num=config.class_num)
# init weight
if args_opt.run_modelarts:
checkpoint_path = os.path.join(local_ckpt_url, args_opt.checkpoint_name)
else:
checkpoint_path = args_opt.ckpt_path
param_dict = load_checkpoint(checkpoint_path)
load_param_into_net(net.backbone, param_dict)
# init lr
lr = Tensor(get_lr(lr_init=config.lr_init,
lr_end=config.lr_end,
lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs,
total_epochs=config.epoch_size,
steps_per_epoch=step_size,
lr_decay_mode=config.lr_decay_mode))
# define opt
opt = Momentum(params=net.trainable_params(),
learning_rate=lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
loss_scale=config.loss_scale)
# define loss, model
if args_opt.loss_name == 'softmax':
loss = Softmaxloss(sparse=True, smooth_factor=0.1, num_classes=config.class_num)
elif args_opt.loss_name == 'triplet':
loss = Tripletloss(margin=0.1)
elif args_opt.loss_name == 'quadruplet':
loss = Quadrupletloss(train_batch_size=config.batch_size, samples_each_class=2, margin=0.1)
else:
print('loss no')
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
if args_opt.loss_name == 'softmax':
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=None,
amp_level='O3', keep_batchnorm_fp32=False)
else:
model = Model(net.backbone, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=None,
amp_level='O3', keep_batchnorm_fp32=False)
#define callback
cb = []
if config.save_checkpoint and (device_num == 1 or device_id == 0):
config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config.keep_checkpoint_max)
check_name = 'ResNet50_' + args_opt.loss_name
if args_opt.run_modelarts:
ckpt_cb = ModelCheckpoint(prefix=check_name, directory=local_train_url, config=config_ck)
else:
save_ckpt_path = os.path.join(config.save_checkpoint_path, 'model_'+ str(device_id) +'/')
ckpt_cb = ModelCheckpoint(prefix=check_name, directory=save_ckpt_path, config=config_ck)
cb += [ckpt_cb]
cb += [Monitor(lr_init=lr.asnumpy())]
# train model
model.train(config.epoch_size - config.pretrain_epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
if args_opt.run_modelarts and config.save_checkpoint and (device_num == 1 or device_id == 0):
mox.file.copy_parallel(src_url=local_train_url, dst_url=args_opt.train_url)
| 47.59375 | 120 | 0.670294 |
09fe26b416f8f39b1bc3594cc62188984305284f
| 8,296 |
py
|
Python
|
test/test_npu/test_network_ops/test_not_equal.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_not_equal.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_not_equal.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNotEqual(TestCase):
def cpu_op_exec(self, input1, input2):
output = torch.ne(input1, input2)
output = output.numpy().astype(np.int32)
return output
def npu_op_exec(self, input1, input2):
output = torch.ne(input1, input2)
output = output.to("cpu")
output = output.numpy().astype(np.int32)
return output
def cpu_op_inplace_exec(self, input1, input2):
input1.ne_(input2)
output = input1.numpy().astype(np.int32)
return output
def npu_op_inplace_exec(self, input1, input2):
input1.ne_(input2)
output = input1.to("cpu")
output = output.numpy().astype(np.int32)
return output
def npu_op_exec_out(self, input1, input2, out):
torch.ne(input1, input2, out=out)
output = out.to("cpu")
output = output.numpy().astype(np.int32)
return output
def not_equal_scalar_result(self, shape_format):
for item in shape_format:
scalar = np.random.uniform(0, 100)
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)
npu_input3 = copy.deepcopy(cpu_input1).to("npu").to(torch.bool)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1, scalar)
npu_output = self.npu_op_exec(npu_input1, scalar)
npu_output_out = self.npu_op_exec_out(npu_input1, scalar, npu_input3)
cpu_output_inp = self.cpu_op_inplace_exec(cpu_input1, scalar)
npu_output_inp = self.npu_op_inplace_exec(npu_input1, scalar)
self.assertRtolEqual(cpu_output, npu_output)
self.assertRtolEqual(cpu_output, npu_output_out)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def not_equal_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)
cpu_input2, npu_input2 = create_common_tensor(item[1], 0, 100)
npu_input3 = copy.deepcopy(cpu_input1).to("npu").to(torch.bool)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_input2 = cpu_input2.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
npu_output_out = self.npu_op_exec_out(npu_input1, npu_input2, npu_input3)
cpu_output_inp = self.cpu_op_inplace_exec(cpu_input1, cpu_input2)
npu_output_inp = self.npu_op_inplace_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
self.assertRtolEqual(cpu_output, npu_output_out)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def test_not_equal_shape_format_fp16_1d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [16]], [np.float16, i, [16]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_1d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [16]], [np.float32, i, [16]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp16_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [448, 1]], [np.float16, i, [448, 1]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [448, 1]], [np.float32, i, [448, 1]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp16_3d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [16, 640, 640]], [np.float16, i, [16, 640, 640]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_3d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float32, i, [16, 640, 640]], [np.float32, i, [16, 640, 640]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp16_4d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float16, i, [32, 3, 3, 3]], [np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_4d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float32, i, [32, 3, 3, 3]], [np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_result(shape_format)
# scala-----------------------------------------------------------------
def test_not_equal_scalar_shape_format_fp16_1d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float16, i, 18]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_1d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float32, i, [18]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp16_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [64, 7]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [64, 7]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_3d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [64, 24, 38]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp16_4d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_4d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_shape_format_int32_1d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [16]], [np.int32, i, [16]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_int32_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [448, 1]], [np.int32, i, [448, 1]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_int32_3d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [16, 640, 640]], [np.int32, i, [16, 640, 640]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_int32_4d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [32, 3, 3, 3]], [np.int32, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_result(shape_format)
instantiate_device_type_tests(TestNotEqual, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 43.434555 | 112 | 0.659595 |
115c2696ff108c7f748acdd64479d1a4247e058f
| 501 |
py
|
Python
|
SQLFileManager.py
|
whde/Movie
|
5d712642242042b0fa2e43f526605def9a6a4343
|
[
"MIT"
] | 1 |
2018-12-03T06:08:46.000Z
|
2018-12-03T06:08:46.000Z
|
SQLFileManager.py
|
whde/Movie
|
5d712642242042b0fa2e43f526605def9a6a4343
|
[
"MIT"
] | null | null | null |
SQLFileManager.py
|
whde/Movie
|
5d712642242042b0fa2e43f526605def9a6a4343
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import threading
mu = threading.Lock()
def create_sql_file():
open('sql.txt', 'w+', encoding='utf-8')
def lock_test(sql):
if mu.acquire(True):
write_to_file(sql)
mu.release()
def write_to_file(sql):
fp = open('sql.txt', 'a+')
print('write start!')
try:
fp.write(sql)
finally:
fp.close()
print('write finish!')
def read_sql_file():
fp = open('sql.txt', 'r+')
return fp.read()
| 14.735294 | 43 | 0.556886 |
febb379b1fadcfc13939427c562a9743628bb217
| 290 |
py
|
Python
|
packages/watchmen-auth/src/watchmen_auth/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-auth/src/watchmen_auth/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-auth/src/watchmen_auth/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from .auth_helper import authorize, authorize_token
from .authentication import AuthenticationDetails, AuthenticationManager, AuthenticationProvider, AuthenticationScheme
from .authorization import AuthFailOn401, AuthFailOn403, Authorization
from .principal_service import PrincipalService
| 58 | 118 | 0.889655 |
fedbbb8d9bc97ca7bc07bcf42748c00fe9cb014e
| 150 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/24.zip.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/24.zip.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/24.zip.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
names = ["Navin", "Kiran", "Harsh", "Navin"]
comps = ["Dell", "Apple", "MS", "Dell"]
zipped = zip(names, comps)
for a, b in zipped:
print(a, b)
| 18.75 | 44 | 0.56 |
3a6f4b18547be01116e98140fccd762c654faa91
| 1,881 |
py
|
Python
|
src/bo4e/com/zeitreihenwert.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/com/zeitreihenwert.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/com/zeitreihenwert.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Contains Zeitreihenwert class
and corresponding marshmallow schema for de-/serialization
"""
from datetime import datetime
import attr
from marshmallow import fields
from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema
from bo4e.validators import check_bis_is_later_than_von
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class Zeitreihenwert(Zeitreihenwertkompakt):
"""
Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen.
.. HINT::
`Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_
"""
# required attributes
datum_uhrzeit_von: datetime = attr.ib(
validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von]
) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv)
datum_uhrzeit_bis: datetime = attr.ib(
validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von]
) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet (exklusiv)
def _get_inclusive_start(self) -> datetime:
"""return the inclusive start (used in the validator)"""
return self.datum_uhrzeit_von
def _get_exclusive_end(self) -> datetime:
"""return the exclusive end (used in the validator)"""
return self.datum_uhrzeit_bis
class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema):
"""
Schema for de-/serialization of Zeitreihenwert.
"""
class_name = Zeitreihenwert # type:ignore[assignment]
# required attributes
datum_uhrzeit_von = fields.DateTime(data_key="datumUhrzeitVon")
datum_uhrzeit_bis = fields.DateTime(data_key="datumUhrzeitBis")
| 36.882353 | 183 | 0.759702 |
c94972e43abc5815818c8d971961d98908db47f7
| 610 |
py
|
Python
|
elements/python/11/10/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11 |
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
elements/python/11/10/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1 |
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
elements/python/11/10/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
import heapq
import random
class Stack(object):
def __init__(self):
self.h = []
def push(self, x):
heapq.heappush(self.h, (-len(self.h), x))
def pop(self):
if self.empty():
return None
_, x = heapq.heappop(self.h)
return x
def empty(self):
return len(self.h) == 0
def test():
items = [random.randrange(100) for _ in xrange(100)]
s = Stack()
for x in items:
s.push(x)
for x in reversed(items):
assert x == s.pop()
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
| 16.052632 | 56 | 0.527869 |
42c69d359bed45586ffb7396c6fd890b5ac2e4f8
| 563 |
py
|
Python
|
Projekteuler/projecteuler_aufgabe001.py
|
kilian-funk/Python-Kurs
|
f5ef5a2fb2a875d2e80d77c1a6c3596a0e577d7f
|
[
"MIT"
] | null | null | null |
Projekteuler/projecteuler_aufgabe001.py
|
kilian-funk/Python-Kurs
|
f5ef5a2fb2a875d2e80d77c1a6c3596a0e577d7f
|
[
"MIT"
] | null | null | null |
Projekteuler/projecteuler_aufgabe001.py
|
kilian-funk/Python-Kurs
|
f5ef5a2fb2a875d2e80d77c1a6c3596a0e577d7f
|
[
"MIT"
] | null | null | null |
"""
Aufgabe 1 aus http://projecteuler.net
(Deutsche Übersetzung auf http://projekteuler.de)
Wenn wir alle natürlichen Zahlen unter 10 auflisten, die Vielfache von 3
oder 5 sind, so erhalten wir 3, 5, 6 und 9. Die Summe dieser Vielfachen ist 23.
Finden Sie die Summe aller Vielfachen von 3 oder 5 unter 1000.
Lösungshilfe: Zerlege die Aufgabenstellung in die verschiedenen Teile. Löse erst
vereinfachte Aufgaben, z. B. Finde alle Vielfache von 3 unter 20. Nähere die Aufgabe Stück
für Stück der eigentlichen Fage an.
"""
summe = # Los gehts ...
print(summe)
| 29.631579 | 90 | 0.765542 |
284b349d772a3697a416d1f85c88edc7197debdd
| 212 |
py
|
Python
|
DataStructures/LinkedList/CycleDetection.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
DataStructures/LinkedList/CycleDetection.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
DataStructures/LinkedList/CycleDetection.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
def has_cycle(head):
ptr1 = head
ptr2 = head
while ptr2 and ptr1.next:
ptr1 = ptr1.next.next
ptr2 = ptr2.next
if ptr1 is ptr2:
return 1
return 0
| 17.666667 | 29 | 0.54717 |
288dbe0819f6ab670d4d780c584ed1ac0e60354f
| 165 |
py
|
Python
|
models/spec_analyse.py
|
zaqwes8811/voicegen
|
938e26d9e83c8be9df830698aa5b65cb904dd2eb
|
[
"Apache-2.0"
] | null | null | null |
models/spec_analyse.py
|
zaqwes8811/voicegen
|
938e26d9e83c8be9df830698aa5b65cb904dd2eb
|
[
"Apache-2.0"
] | null | null | null |
models/spec_analyse.py
|
zaqwes8811/voicegen
|
938e26d9e83c8be9df830698aa5b65cb904dd2eb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import wave as wv
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1);
y = np.sin(x)
plt.plot(x, y)
| 20.625 | 31 | 0.648485 |
6c2171b07e9987cfb49540e503dc6107e2ef63fc
| 2,466 |
py
|
Python
|
spo/spo/doctype/medizinischer_bericht/medizinischer_bericht.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | null | null | null |
spo/spo/doctype/medizinischer_bericht/medizinischer_bericht.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | 6 |
2019-08-23T18:36:26.000Z
|
2019-11-12T13:12:12.000Z
|
spo/spo/doctype/medizinischer_bericht/medizinischer_bericht.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | 1 |
2021-08-14T22:22:43.000Z
|
2021-08-14T22:22:43.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, libracore and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.data import formatdate
class MedizinischerBericht(Document):
pass
# def validate(self):
# for ausgangslage in self.ausgangslage:
# if ausgangslage.krankengeschichte:
# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace("<br>", "")
# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace("</div>", "<br>")
# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace("<div>", "")
# if ausgangslage.bemerkung:
# ausgangslage.bemerkung = ausgangslage.bemerkung.replace("<br>", "")
# ausgangslage.bemerkung = ausgangslage.bemerkung.replace("</div>", "<br>")
# ausgangslage.bemerkung = ausgangslage.bemerkung.replace("<div>", "")
# for korrespondenz in self.korrespondenz:
# if korrespondenz.wortlaut:
# korrespondenz.wortlaut = korrespondenz.wortlaut.replace("<br>", "")
# korrespondenz.wortlaut = korrespondenz.wortlaut.replace("</div>", "<br>")
# korrespondenz.wortlaut = korrespondenz.wortlaut.replace("<div>", "")
# if korrespondenz.bemerkung:
# korrespondenz.bemerkung = korrespondenz.bemerkung.replace("<br>", "")
# korrespondenz.bemerkung = korrespondenz.bemerkung.replace("</div>", "<br>")
# korrespondenz.bemerkung = korrespondenz.bemerkung.replace("<div>", "")
@frappe.whitelist()
def get_deckblat_data(mandat):
data = {}
if mandat:
mandat = frappe.get_doc("Mandat", mandat)
if mandat.kontakt:
patienten_kontakt = frappe.get_doc("Contact", mandat.kontakt)
data["name_klient"] = patienten_kontakt.first_name + " " + patienten_kontakt.last_name
data["geburtsdatum_klient"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy')
else:
data["name_klient"] = ''
data["geburtsdatum_klient"] = ''
employee = frappe.db.sql("""SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'""".format(owner=frappe.session.user), as_dict=True)
if len(employee) > 0:
data["beraterin"] = employee[0].name
else:
data["beraterin"] = ''
if mandat.rsv:
data["rsv"] = mandat.rsv
else:
data["rsv"] = ''
if mandat.rsv_kontakt:
data["rsv_kontakt"] = mandat.rsv_kontakt
else:
data["rsv_kontakt"] = ''
return data
else:
return False
| 41.1 | 142 | 0.711273 |
9fd9cb4ef25ac783d9cb8e8694587b821db59b4d
| 3,371 |
py
|
Python
|
js/SFLIX/getfilepath.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
js/SFLIX/getfilepath.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
js/SFLIX/getfilepath.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
import os
from shutil import move as moveFile
os.chdir(os.getcwd())
print("".center(50, "="))
print("Update STEFFLIX-Daten".center(50))
print("".center(50, "="))
homeDir = os.getcwd()
allowedFileTypes = ["jpg", "jpeg", "mp4", "mp3", "png"]
diallowedItems = ["System Volume Information", "$RECYCLE.BIN", ".vscode", "sflix_sys"]
def recursiveCrawler(path, project="", serie="", staffel="", folge="", filelist={}, depth=0):
if depth == 0:
pass
elif depth == 1:
project = path.split("\\")[-1]
filelist.setdefault(project, {})
elif depth == 2:
serie = path.split("\\")[-1]
filelist[project].setdefault(serie, {})
elif depth == 3:
staffel = path.split("\\")[-1]
filelist[project][serie].setdefault(staffel, {})
elif depth == 4:
folge = path.split("\\")[-1]
filelist[project][serie][staffel].setdefault(folge, {})
# print(f"{project} {serie} {staffel}")
folderContent = os.listdir(path)
for item in folderContent:
if not item in diallowedItems:
if os.path.isfile(os.path.join(path, item)):
extension = item.split(".")[-1]
if extension in allowedFileTypes:
if depth == 1:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project].setdefault(os.path.join(".", relPath))
elif depth == 2:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie].setdefault(os.path.join(".", relPath))
elif depth == 3:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie][staffel].setdefault(os.path.join(".", relPath), None)
elif depth > 3:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie][staffel][folge].setdefault(os.path.join(".", relPath), None)
elif os.path.isdir(os.path.join(path, item)):
filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1)
return filelist
print("Durchsuche Ordner...".ljust(40), end="")
try:
filelist = recursiveCrawler(homeDir)
print("OK")
except:
print("Fehler")
# fileWriter = open(os.path.join(homeDir, "output.txt"), "w", encoding="utf-8")
# fileWriter.write(str(filelist).replace("\\\\", "/").replace("None", "null"))
# fileWriter.close()
try:
print("Erstelle Backup...".ljust(40), end="")
if os.path.exists(os.path.join(homeDir, "sflix_sys", "data.js.bak")):
os.remove(os.path.join(homeDir, "sflix_sys", "data.js.bak"))
moveFile(os.path.join(homeDir, "sflix_sys", "data.js"), os.path.join(homeDir, "sflix_sys", "data.js.bak"))
print("OK")
except:
print("Fehler")
try:
print("Speichere neue Version...".ljust(40), end="")
fileWriter = open(os.path.join(homeDir, "sflix_sys", "data.js"), "w", encoding="utf-8")
fileWriter.write("var data = " + str(filelist).replace("\\\\", "/").replace("None", "null") + ";")
fileWriter.close()
print("OK")
except:
print("Fehler")
print("".center(50, "="))
print("Update abgeschlossen".center(50))
print("".center(50, "="))
print()
input("Enter zum Beenden")
| 37.455556 | 120 | 0.571344 |
b00f2460e890e6f4a142d1daf4e25740ab790667
| 1,200 |
py
|
Python
|
Interview Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Interview Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Interview Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the maxRegion function below.
def in_graph(grid, i, j):
n = len(grid)
m = len(grid[0])
return i >= 0 and j >= 0 and i < n and j < m
def dfs(grid, visited, i, j):
visited.add((i, j))
ans = 1
neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)]
for (x, y) in neighbors:
if in_graph(grid, x, y) and (x, y) not in visited and grid[x][y] == 1:
ans += dfs(grid, visited, x, y)
return ans
def max_region(grid):
visited = set()
n = len(grid)
m = len(grid[0])
max_value = 0
for i in range(n):
for j in range(m):
if grid[i][j] == 1 and (i, j) not in visited:
ans = dfs(grid, visited, i, j)
max_value = max(max_value, ans)
return max_value
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
m = int(input())
grid = []
for _ in range(n):
grid.append(list(map(int, input().rstrip().split())))
res = max_region(grid)
fptr.write(str(res) + '\n')
fptr.close()
| 21.052632 | 104 | 0.5225 |
b020e29b5069b5c83b3331ecac5504bcdfad3e5a
| 465 |
py
|
Python
|
expect.py
|
chendong2016/chendong2016.github.io
|
d120b4aedd8739791432117665bf3c927db183d1
|
[
"MIT"
] | null | null | null |
expect.py
|
chendong2016/chendong2016.github.io
|
d120b4aedd8739791432117665bf3c927db183d1
|
[
"MIT"
] | null | null | null |
expect.py
|
chendong2016/chendong2016.github.io
|
d120b4aedd8739791432117665bf3c927db183d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import pexpect
import os
import sys
def git_expect(repodir, u, p):
os.chdir(repodir)
os.system('git pull')
os.system('git add .')
os.system('git commit -m update')
foo = pexpect.spawn('git push')
foo.expect('.*Username.*:')
foo.sendline(u)
foo.expect('.*ssword:*')
foo.sendline(p)
print foo.read()
def main(argv):
git_expect(argv[1], argv[2], argv[3])
if __name__ == '__main__':
main(sys.argv)
| 18.6 | 41 | 0.615054 |
af00604e8cec5ece69bda0fcc4b6b77604e5f984
| 2,070 |
py
|
Python
|
Session09_AWSSagemakerAndLargeScaleModelTraining/utils_cifar.py
|
garima-mahato/TSAI_EMLO1.0
|
f1478572a20988296831e70d6cf1dac9b36e7573
|
[
"Apache-2.0"
] | null | null | null |
Session09_AWSSagemakerAndLargeScaleModelTraining/utils_cifar.py
|
garima-mahato/TSAI_EMLO1.0
|
f1478572a20988296831e70d6cf1dac9b36e7573
|
[
"Apache-2.0"
] | null | null | null |
Session09_AWSSagemakerAndLargeScaleModelTraining/utils_cifar.py
|
garima-mahato/TSAI_EMLO1.0
|
f1478572a20988296831e70d6cf1dac9b36e7573
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('beaver','dolphin','otter','seal','whale','aquarium fish','flatfish','ray','shark','trout','orchids','poppies','roses','sunflowers','tulips','bottles','bowls','cans','cups','plates','apples','mushrooms','oranges','pears','sweet peppers','clock','computer keyboard','lamp','telephone','television','bed','chair','couch','table','wardrobe','bee','beetle','butterfly','caterpillar','cockroach','bear','leopard','lion','tiger','wolf','bridge','castle','house','road','skyscraper','cloud','forest','mountain','plain','sea','camel','cattle','chimpanzee','elephant','kangaroo','fox','porcupine','possum','raccoon','skunk','crab','lobster','snail','spider','worm','baby','boy','girl','man','woman','crocodile','dinosaur','lizard','snake','turtle','hamster','mouse','rabbit','shrew','squirrel','maple','oak','palm','pine','willow','bicycle','bus','motorcycle','pickup truck','train','lawn-mower','rocket','streetcar','tank','tractor')
def _get_transform():
return transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
return torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
def get_test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform)
return torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# function to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
| 57.5 | 936 | 0.637681 |
597f409d6d7b0943097111bbce6c5d890bf7b9bd
| 2,452 |
py
|
Python
|
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/creational/prototype.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/creational/prototype.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/creational/prototype.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
"""
*What is this pattern about?
This patterns aims to reduce the number of classes required by an
application. Instead of relying on subclasses it creates objects by
copying a prototypical instance at run-time.
This is useful as it makes it easier to derive new kinds of objects,
when instances of the class have only a few different combinations of
state, and when instantiation is expensive.
*What does this example do?
When the number of prototypes in an application can vary, it can be
useful to keep a Dispatcher (aka, Registry or Manager). This allows
clients to query the Dispatcher for a prototype before cloning a new
instance.
Below provides an example of such Dispatcher, which contains three
copies of the prototype: 'default', 'objecta' and 'objectb'.
*TL;DR
Creates new object instances by cloning prototype.
"""
from typing import Any, Dict
class Prototype:
def __init__(self, value: str = "default", **attrs: Any) -> None:
self.value = value
self.__dict__.update(attrs)
def clone(self, **attrs: Any) -> None:
"""Clone a prototype and update inner attributes dictionary"""
# Python in Practice, Mark Summerfield
# copy.deepcopy can be used instead of next line.
obj = self.__class__(**self.__dict__)
obj.__dict__.update(attrs)
return obj
class PrototypeDispatcher:
def __init__(self):
self._objects = {}
def get_objects(self) -> Dict[str, Prototype]:
"""Get all objects"""
return self._objects
def register_object(self, name: str, obj: Prototype) -> None:
"""Register an object"""
self._objects[name] = obj
def unregister_object(self, name: str) -> None:
"""Unregister an object"""
del self._objects[name]
def main() -> None:
"""
>>> dispatcher = PrototypeDispatcher()
>>> prototype = Prototype()
>>> d = prototype.clone()
>>> a = prototype.clone(value='a-value', category='a')
>>> b = a.clone(value='b-value', is_checked=True)
>>> dispatcher.register_object('objecta', a)
>>> dispatcher.register_object('objectb', b)
>>> dispatcher.register_object('default', d)
>>> [{n: p.value} for n, p in dispatcher.get_objects().items()]
[{'objecta': 'a-value'}, {'objectb': 'b-value'}, {'default': 'default'}]
>>> print(b.category, b.is_checked)
a True
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.902439 | 76 | 0.667618 |
0501d3d1de8e2cc2be8c008390642f57530559f7
| 221 |
py
|
Python
|
euler-29.py
|
TFabijo/euler
|
58dc07b9adb236890556ccd5d75ca9dbd2b50df9
|
[
"MIT"
] | null | null | null |
euler-29.py
|
TFabijo/euler
|
58dc07b9adb236890556ccd5d75ca9dbd2b50df9
|
[
"MIT"
] | null | null | null |
euler-29.py
|
TFabijo/euler
|
58dc07b9adb236890556ccd5d75ca9dbd2b50df9
|
[
"MIT"
] | null | null | null |
def različne_potence(a_max,b_max):
stevila = set()
for a in range(2,a_max+1):
for b in range(2,b_max+1):
stevila.add(a**b)
return len(stevila)
različne_potence(100,100)
| 22.1 | 35 | 0.565611 |
f951e56906f6675bf3d54a788c7aaed5a54a2c48
| 4,626 |
py
|
Python
|
yolov5-coreml-tflite-converter/utils/constants.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
yolov5-coreml-tflite-converter/utils/constants.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
yolov5-coreml-tflite-converter/utils/constants.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
import os
# -------------------------------------------------------------------------------------------------------------------- #
# Constants
# -------------------------------------------------------------------------------------------------------------------- #
# General
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DATA_DIR = os.path.join('data')
OUTPUT_DIR = os.path.join(DATA_DIR, 'output')
FLOAT32 = 'float32'
FLOAT16 = 'float16'
INT8 = 'int8'
FULLINT8 = 'fullint8'
FLOAT32_SUFFIX = '_float32'
FLOAT16_SUFFIX = '_float16'
INT8_SUFFIX = '_int8'
FULLINT8_SUFFIX = '_fullint8'
BATCH_SIZE = 1
NB_CHANNEL = 3
# x, y, w, h, score, class1, class2, ...
XY_SLICE = (0, 2)
WH_SLICE = (2, 4)
SCORE_SLICE = (4, 5)
CLASSES_SLICE = (5, 0)
NB_OUTPUTS = 5 # 1 objectness score + 4 bounding box coordinates
NORMALIZATION_FACTOR = 255.
# Input names
IMAGE_NAME = 'image'
NORMALIZED_SUFFIX = '_normalized'
QUANTIZED_SUFFIX = '_quantized'
IOU_NAME = 'iou threshold'
CONF_NAME = 'conf threshold'
# Colors
BLUE = '\033[36m'
GREEN = '\033[32m'
RED = '\033[31m'
YELLOW = '\033[33m'
PURPLE = '\033[34m'
END_COLOR = '\033[0m'
BOLD = '\033[1m'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# CoreML converter
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
COREML_SUFFIX = '.mlmodel'
TORCHSCRIPT_SUFFIX = '.torchscript.pt'
# Outputs names
CONFIDENCE_NAME = 'confidence' # list of class scores
COORDINATES_NAME = 'coordinates' # (x, y, w, h)
RAW_PREFIX = 'raw_'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# TFLite converter
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
TFLITE_SUFFIX = '.tflite'
LABELS_NAME = 'labels.txt'
# Format
TFLITE = 'tflite'
SAVED_MODEL = 'saved_model'
GRAPH_DEF_SUFFIX = '.pb'
# NMS
PADDED = 'padded'
SIMPLE = 'simple'
COMBINED = 'combined'
# Representative dataset
BAHNHOF = 'bahnhof'
WAGEN = 'wagen'
TRAKTION = 'traktion'
# Output names
BOUNDINGBOX_NAME = 'location' # (y1, x1, y2, x2)
CLASSES_NAME = 'category' # class index
SCORES_NAME = 'score' # confidence score
NUMBER_NAME = 'number of detections' # number of detected object in the image
DETECTIONS_NAME = 'detection results'
PREDICTIONS_NAME = 'predictions'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# ONNX converter
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
ONNX_SUFFIX = '.onnx'
OPSET = 12
# -------------------------------------------------------------------------------------------------------------------- #
# Default values
# -------------------------------------------------------------------------------------------------------------------- #
DEFAULT_COREML_NAME = 'yolov5-coreML'
DEFAULT_TFLITE_NAME = 'yolov5-TFLite'
DEFAULT_ONNX_NAME = 'yolov5-ONNX'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Common values
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DEFAULT_MODEL_OUTPUT_DIR = os.path.join(OUTPUT_DIR, 'converted_models')
DEFAULT_PT_MODEL = os.path.join('data', 'models', 'best.pt')
DEFAULT_INPUT_RESOLUTION = 640
DEFAULT_QUANTIZATION_TYPE = FLOAT32
DEFAULT_IOU_THRESHOLD = 0.45
DEFAULT_CONF_THRESHOLD = 0.25
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# TFlite additional default values
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DEFAULT_SOURCE_DATASET = WAGEN
DEFAULT_NB_CALIBRATION = 500
DEFAULT_MAX_NUMBER_DETECTION = 20
def get_zipfile_path(source):
return os.path.join(DATA_DIR, f'{source}_500.zip')
def get_dataset_url(source):
return f'https://sbb-ml-public-resources-prod.s3.eu-central-1.amazonaws.com/quantization/{source}_500.zip'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Inference
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DEFAULT_DETECTED_IMAGE_DIR = os.path.join(OUTPUT_DIR, 'detections')
| 35.584615 | 120 | 0.409641 |
fb11da9cad0274eb51c00948a846ff631a019147
| 3,437 |
py
|
Python
|
Machine Learning/TensorflowExamples/simple_gradient_descent.py
|
sarojjethva/Learning-Resources
|
17da7dd5e39c28fb1e363b9f0643d624ab047274
|
[
"MIT"
] | 639 |
2016-03-17T10:54:05.000Z
|
2021-07-22T10:17:36.000Z
|
Machine Learning/TensorflowExamples/simple_gradient_descent.py
|
sarojjethva/Learning-Resources
|
17da7dd5e39c28fb1e363b9f0643d624ab047274
|
[
"MIT"
] | 210 |
2016-03-19T15:18:28.000Z
|
2020-10-01T06:36:47.000Z
|
Machine Learning/TensorflowExamples/simple_gradient_descent.py
|
sarojjethva/Learning-Resources
|
17da7dd5e39c28fb1e363b9f0643d624ab047274
|
[
"MIT"
] | 376 |
2016-03-17T10:54:09.000Z
|
2021-08-19T18:22:29.000Z
|
"""
Author: Yash Mewada
Github: github.com/yashbmewada
Program for demonstrating simple line fitting using Tensorflow and Gradient Descent Algorithm
This program trains the model to fit two values, slope(m) and x-intercept(b) in the equation
of line y=mx+b. Here we would provide very small dataset of randomly generated pointset xs and ys
and train the tensorflow model to adjust the values of m and b in order to fit a straight line.
This straight line can further be used to predict any unknown value Y for a given unknown X based on the
learned value of m and b.
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # called in order to minimize the warnings about SSE4.1 instructions.
import tensorflow as tf
"""
Random points of X and Y form the training data. aka Dataset (only training. no validation or test)
"""
xs = [0.00,2.00,4.00,6.00,8.00,10.00,12.00,14.00] #features
ys = [-0.82,-0.90,-0.12,0.26,0.31,0.64,1.02,1.00] #labels (actual outputs)
"""
Initial values for m and b. These values would be adjusted to fit the above dataset point
"""
m_initial = -0.50
b_initial = 1.00
"""
tf.Variable : allows us to create variables whose values can be adjusted in order to learn at each pass on the dataset.
"""
m = tf.Variable(m_initial)
b = tf.Variable(b_initial)
"""
In order to adjust and fit the line, we try to minimize the "error" between two given values of (x,y) so that the
line can be fit properly as we minimize the value of distances between our m and b i.e. predicted_y and actual y
(from "ys").
"""
error = 0.0
"""
We write an operation for calculation of error and also iteration over the value of X and Y from the Dataset [xs,ys].
Running this over around 1000 times we would be able to minimize the error to a respecable fit for the line.
"""
for x,y in zip(xs,ys):
predicted_y = m*x + b
error += (y-predicted_y)**2 # this is the square of difference of error added to the total error 'cost' which we minimize.
"""
Now, in order to train over this operation set we defined above, we use tensorflow Gradient Descent Optimizer which allows
us to train over this data set and we pass the "error" to the minimize() function of this optimizer as a parameter.abs
here while initialization of the Gradient Descent optimizer, we define a learning_rate = 0.001.
This learning rate defines the magnitude OR "how big" of a jump we want to make while minimizing the "cost" / "error".abs
Remember Too Small a learning rate would make your training very slow and Too big learning rate would make the training never find
an optimum solution. Best Learning Rate can be found by trying different values. Here we take 0.001 randomly as it usually works in
most cases.
"""
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(error)
"""
Tensorflow uses a "session" to run the above mentioned training steps.
So before starting the session it is always advisable to initialize variables randomly.
"""
init_op = tf.global_variables_initializer()
"""
All the calculations would now be done in a Session
"""
with tf.Session() as session:
session.run(init_op)
_ITERATIONS = 1000 #number of passes on the dataset
for iteration in range(_ITERATIONS):
session.run(optimizer_op) #calling our optimization operator to minimize error
slope, intercept = session.run((m,b)) #calling our adjusted values
print('slope: ', slope , 'Intercept: ', intercept)
| 40.916667 | 132 | 0.748036 |
34c56272a08186e39af3f77a180a6a2272eff2cb
| 6,609 |
py
|
Python
|
Securinets/2021/Quals/web/Warmup/app.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
Securinets/2021/Quals/web/Warmup/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
Securinets/2021/Quals/web/Warmup/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
from itsdangerous import Signer, base64_encode, base64_decode
from flask import Flask, request, render_template, make_response, g, Response
from flask.views import MethodView
import urlparse
import shutil
import utils
import os
import mimetypes
app = Flask(__name__.split('.')[0])
app.config.from_object(__name__)
BUFFER_SIZE = 128000
URI_BEGINNING_PATH = {
'authorization': '/login/',
'weeb': '/weeb/wtf/',
}
def generate_key():
app.secret_key = os.urandom(24)
def generate_cookie_info(origin=None):
if not origin:
origin = request.headers.get('Origin')
useragent = request.headers.get('User-Agent')
return '%s %s' % (str(origin), str(useragent))
def verify_cookie(cookey):
is_correct = False
cookie_value = request.cookies.get(cookey)
if cookie_value:
s = Signer(app.secret_key)
expected_cookie_content = \
generate_cookie_info(base64_decode(cookey))
expected_cookie_content = s.get_signature(expected_cookie_content)
if expected_cookie_content == cookie_value:
is_correct = True
return is_correct
def is_authorized():
origin = request.headers.get('Origin')
if origin is None:
return True
return verify_cookie(base64_encode(origin))
@app.before_request
def before_request():
headers = {}
headers['Access-Control-Max-Age'] = '3600'
headers['Access-Control-Allow-Credentials'] = 'true'
headers['Access-Control-Allow-Headers'] = \
'Origin, Accept, Accept-Encoding, Content-Length, ' + \
'Content-Type, Authorization, Depth, If-Modified-Since, '+ \
'If-None-Match'
headers['Access-Control-Expose-Headers'] = \
'Content-Type, Last-Modified, WWW-Authenticate'
origin = request.headers.get('Origin')
headers['Access-Control-Allow-Origin'] = origin
specific_header = request.headers.get('Access-Control-Request-Headers')
if is_authorized():
status_code = 200
elif request.method == 'OPTIONS' and specific_header:
headers['Access-Control-Request-Headers'] = specific_header
headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'])
response = make_response('', 200, headers)
return response
else:
s = Signer(app.secret_key)
headers['WWW-Authenticate'] = 'Nayookie login_url=' + \
urlparse.urljoin(request.url_root,
URI_BEGINNING_PATH['authorization']) + '?sig=' + \
s.get_signature(origin) + '{&back_url,origin}'
response = make_response('', 401, headers)
return response
g.status = status_code
g.headers = headers
class weeb(MethodView):
methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']
def __init__(self):
self.baseuri = URI_BEGINNING_PATH['weeb']
def get_body(self):
request_data = request.data
try:
length = int(request.headers.get('Content-length'))
except ValueError:
length = 0
if not request_data and length:
try:
request_data = request.form.items()[0][0]
except IndexError:
request_data = None
return request_data
def get(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def put(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def propfind(self, pathname):
status = g.status
headers = g.headers
pf = utils.PropfindProcessor(
URI_BEGINNING_PATH['weeb'] + pathname,
app.fs_handler,
request.headers.get('Depth', 'infinity'),
self.get_body())
try:
response = make_response(pf.create_response() + '\n', status, headers)
except IOError, e:
response = make_response('Not found', 404, headers)
return response
def delete(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def copy(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def move(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def options(self, pathname):
return make_response('', g.status, g.headers)
weeb_view = weeb.as_view('dav')
app.add_url_rule(
'/weeb/wtf/',
defaults={'pathname': ''},
view_func=weeb_view
)
app.add_url_rule(
URI_BEGINNING_PATH['weeb'] + '<path:pathname>',
view_func=weeb_view
)
@app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST'])
def authorize():
origin = request.args.get('origin')
if request.method == 'POST':
response = make_response()
if request.form.get('continue') != 'true':
generate_key()
s = Signer(app.secret_key)
if s.get_signature(origin) == request.args.get('sig'):
key = base64_encode(str(origin))
back = request.args.get('back_url')
info = generate_cookie_info(origin=origin)
response.set_cookie(key, value=s.get_signature(info), max_age=None,
expires=None, path='/', domain=None, secure=True, httponly=True)
else:
return 'Something went wrong...'
response.status = '301' #
response.headers['Location'] = '/' if not back else back
else:
response = make_response(render_template('authorization_page.html',
cookie_list=[ base64_decode(cookey)
for cookey in
request.cookies.keys()
if verify_cookie(cookey) ],
origin=request.args.get('origin'),
back_url=request.args.get('back_url')))
return response
if __name__ == '__main__':
app.fs_path = '/app/'
app.fs_handler = utils.FilesystemHandler(app.fs_path,
URI_BEGINNING_PATH['weeb'])
generate_key()
app.run(host="0.0.0.0")
| 28.734783 | 127 | 0.589045 |
1f5ad0ea434b6b0ee51b3be2fff5dab3afd07d73
| 1,693 |
py
|
Python
|
src/test/tests/databases/xform_precision.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/databases/xform_precision.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/databases/xform_precision.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: xform_precision.py
#
# Tests: Transform manager's conversion to float
#
# Programmer: Mark C. Miller
# Date: September 24, 2006
#
# Modifications:
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# Turn off force single precision for this test
#
readOptions=GetDefaultFileOpenOptions("Silo")
readOptions["Force Single"] = 0
SetDefaultFileOpenOptions("Silo", readOptions)
#
# Test ordinary float data (no conversion) first
#
AddPlot("Mesh","mesh")
DrawPlots()
Test("float_xform_01")
DeleteAllPlots()
#
# Ok, now read a mesh with double coords
#
AddPlot("Mesh","meshD")
DrawPlots()
Test("float_xform_02")
DeleteAllPlots()
CloseDatabase(silo_data_path("quad_disk.silo"))
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# test float data on a float mesh
#
AddPlot("Pseudocolor","sphElev_on_mesh")
DrawPlots()
Test("float_xform_03")
DeleteAllPlots()
#
# test float data on a double mesh
#
AddPlot("Pseudocolor","sphElev_on_meshD")
DrawPlots()
Test("float_xform_04")
DeleteAllPlots()
#
# test double data on a float mesh
#
AddPlot("Pseudocolor","sphElevD_on_mesh")
DrawPlots()
Test("float_xform_05")
DeleteAllPlots()
CloseDatabase(silo_data_path("quad_disk.silo"))
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# test double data on a double mesh
#
AddPlot("Pseudocolor","sphElevD_on_meshD")
DrawPlots()
Test("float_xform_06")
DeleteAllPlots()
Exit()
| 19.686047 | 78 | 0.670998 |
2f6244b35b7a2075be7229011d561a8fcff2ef5e
| 356 |
py
|
Python
|
python/decorator/vanishing_ret_fixed.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/decorator/vanishing_ret_fixed.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/decorator/vanishing_ret_fixed.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
def debug_transformer(func):
def wrapper():
print(f'Function `{func.__name__}` called')
ret = func()
print(f'Function `{func.__name__}` finished')
return ret
return wrapper
@debug_transformer
def walkout():
print('Bye Felical')
@debug_transformer
def get_bob():
return 'Bob'
bob = get_bob()
print(bob)
| 16.181818 | 53 | 0.632022 |
8d0adc4ed9ef5199fe4fc22c9e4224a584dd7c04
| 1,177 |
py
|
Python
|
tests/test_myst_plugins.py
|
noirbizarre/pelican-myst
|
c2c7b44803ebc33e70d915c35b692df14597469b
|
[
"MIT"
] | null | null | null |
tests/test_myst_plugins.py
|
noirbizarre/pelican-myst
|
c2c7b44803ebc33e70d915c35b692df14597469b
|
[
"MIT"
] | 21 |
2021-12-21T16:47:35.000Z
|
2022-03-29T04:34:59.000Z
|
tests/test_myst_plugins.py
|
noirbizarre/pelican-myst
|
c2c7b44803ebc33e70d915c35b692df14597469b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Union
import pytest
from .helpers import read_content_metadata
# from pyquery import PyQuery as pq
TASKLIST_EXPECTATIONS: tuple[tuple[Union[dict, list], str], ...] = (
([], "disabled"),
({}, "disabled"),
(["tasklist"], "default"),
({"tasklist": {}}, "default"),
({"tasklist": dict(enabled=True)}, "enabled"),
({"tasklist": dict(label=True)}, "label"),
)
@pytest.mark.parametrize("setting,key", TASKLIST_EXPECTATIONS)
def test_myst_tasklist(setting, key):
content, meta = read_content_metadata("myst/tasklist.md", MYST_PLUGINS=setting)
assert content == meta["expected"][key]
# def test_myst_admonitions():
# content, meta = read_content_metadata("myst/admonitions.md", MYST_PLUGINS=["admonitions"])
# print(content)
# html = pq(content)
# admonitions = html.find("div.admonition")
# assert admonitions.length == 8
# assert admonitions.find("p.admonition-title").length == 8
# assert html.find("div.admonition.note").length == 4
# assert html.find("div.admonition.important").length == 2
# assert html.find("div.admonition.warning").length == 1
| 30.179487 | 96 | 0.677995 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.