hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51c9d30479808975f17a8356968ba3ffdf2e3a45
| 278 |
py
|
Python
|
src/onegov/form/filters.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/form/filters.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/form/filters.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.utils import yubikey_public_id
def as_float(value):
return value and float(value) or 0.0
def strip_whitespace(value):
return value and value.strip(' \r\n') or None
def yubikey_identifier(value):
return value and yubikey_public_id(value) or ''
| 19.857143 | 51 | 0.741007 |
51fa66f7327624b0362e31f656c33d867492f9a3
| 2,922 |
py
|
Python
|
Library/ContractUtils.py
|
rccannizzaro/QC-StrategyBacktest
|
847dbd61680466bc60ce7893eced8a8f70d16b2e
|
[
"Apache-2.0"
] | 11 |
2021-12-02T15:41:47.000Z
|
2022-03-14T03:49:22.000Z
|
Library/ContractUtils.py
|
ikamanu/QC-StrategyBacktest
|
847dbd61680466bc60ce7893eced8a8f70d16b2e
|
[
"Apache-2.0"
] | null | null | null |
Library/ContractUtils.py
|
ikamanu/QC-StrategyBacktest
|
847dbd61680466bc60ce7893eced8a8f70d16b2e
|
[
"Apache-2.0"
] | 5 |
2022-02-02T12:07:51.000Z
|
2022-02-13T02:24:19.000Z
|
########################################################################################
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################################
from Logger import *
class ContractUtils:
def __init__(self, context):
# Set the context
self.context = context
# Set the logger
self.logger = Logger(context, className = type(self).__name__, logLevel = context.logLevel)
def getUnderlyingLastPrice(self, contract):
# Get the context
context = self.context
# Get the object from the Securities dictionary if available (pull the latest price), else use the contract object itself
if contract.UnderlyingSymbol in context.Securities:
security = context.Securities[contract.UnderlyingSymbol]
# Check if we have found the security
if security != None:
# Get the last known price of the security
return context.GetLastKnownPrice(security).Price
else:
# Get the UnderlyingLastPrice attribute of the contract
return contract.UnderlyingLastPrice
def getSecurity(self, contract):
# Get the Securities object
Securities = self.context.Securities
# Check if we can extract the Symbol attribute
if hasattr(contract, "Symbol") and contract.Symbol in Securities:
# Get the security from the Securities dictionary if available (pull the latest price), else use the contract object itself
security = Securities[contract.Symbol]
else:
# Use the contract itself
security = contract
return security
# Returns the mid-price of an option contract
def midPrice(self, contract):
security = self.getSecurity(contract)
return 0.5*(security.BidPrice + security.AskPrice)
def bidAskSpread(self, contract):
security = self.getSecurity(contract)
return abs(security.AskPrice - security.BidPrice)
| 48.7 | 132 | 0.558522 |
cfbeba738a545b8fa816e6c53371f15e1212b0d3
| 2,590 |
py
|
Python
|
sds.py
|
Nico0302/SMG-UntisOfficeConverters
|
b43fd6b2baa58d42f9343c3a40963b7eec689ed5
|
[
"MIT"
] | null | null | null |
sds.py
|
Nico0302/SMG-UntisOfficeConverters
|
b43fd6b2baa58d42f9343c3a40963b7eec689ed5
|
[
"MIT"
] | null | null | null |
sds.py
|
Nico0302/SMG-UntisOfficeConverters
|
b43fd6b2baa58d42f9343c3a40963b7eec689ed5
|
[
"MIT"
] | null | null | null |
import csv
from utils import to_sds_date
SCHOOL_ID = '1'
SCHOOL_FILENAME = 'School.csv'
SECTION_FILENAME = 'Section.csv'
STUDENT_FILENAME = 'Student.csv'
TEACHER_FILENAME = 'Teacher.csv'
STUDENT_ENROLLMENT_FILENAME = 'StudentEnrollment.csv'
TEACHER_ROSTER_FILENAME = 'TeacherRoster.csv'
class Writer:
HEADERS = []
def __init__(self, csvfile, datascource):
self.csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
self.datasource = datascource
def row_iterator(self, sourcerow):
return sourcerow
def generate(self, *args, **kwargs):
self.csvwriter.writerow(self.HEADERS)
for sourcerow in self.datasource:
self.csvwriter.writerow(self.row_iterator(sourcerow, *args, **kwargs))
class School(Writer):
HEADERS = ['SIS ID', 'Name']
def __init__(self, csvfile, schoolname):
super(School, self).__init__(csvfile, [[SCHOOL_ID, schoolname]])
class Section(Writer):
HEADERS = ['SIS ID', 'School SIS ID', 'Section Name']
def __init__(self, csvfile, datascource):
super(Section, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow, schoolyear=2020):
return [
sourcerow.get_id(schoolyear),
SCHOOL_ID,
sourcerow.course + ' ' + str(sourcerow.grade)
]
class Student(Writer):
HEADERS = ['SIS ID', 'School SIS ID', 'Username', 'Grade']
def __init__(self, csvfile, datascource):
super(Student, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow):
return [sourcerow.shortname, SCHOOL_ID, sourcerow.get_username(), sourcerow.grade]
class Teacher(Writer):
HEADERS = ['SIS ID', 'School SIS ID', 'Username']
def __init__(self, csvfile, datascource):
super(Teacher, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow):
return [sourcerow.shortname, SCHOOL_ID, sourcerow.get_username()]
class StudentEnrollment(Writer):
HEADERS = ['Section SIS ID', 'SIS ID']
def __init__(self, csvfile, datascource):
super(StudentEnrollment, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow, schoolyear=2020):
return [sourcerow.get_id(schoolyear), sourcerow.student]
class TeacherRoaster(Writer):
HEADERS = ['Section SIS ID', 'SIS ID']
def __init__(self, csvfile, datascource):
super(TeacherRoaster, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow, schoolyear=2020):
return [sourcerow.get_id(schoolyear), sourcerow.teacher]
| 31.204819 | 90 | 0.688417 |
5c61d523b5aab9332a583c4fbce5282a1d349b34
| 198 |
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C03StacksQueues/questions/3.2-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C03StacksQueues/questions/3.2-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C03StacksQueues/questions/3.2-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# 3.2 Stack Min
# How would you design a stack which, in addition to push and pop, has a function min which
# returns the minimum element? Push, pop, and min should all operate in O(1) time.
| 39.6 | 92 | 0.712121 |
7a6651f10741e2f55d41c5d49f4a11f863eca070
| 8,311 |
py
|
Python
|
venv/lib/python3.7/site-packages/twilio/rest/preview/trusted_comms/brands_information.py
|
uosorio/heroku_face
|
7d6465e71dba17a15d8edaef520adb2fcd09d91e
|
[
"Apache-2.0"
] | 1,362 |
2015-01-04T10:25:18.000Z
|
2022-03-24T10:07:08.000Z
|
venv/lib/python3.7/site-packages/twilio/rest/preview/trusted_comms/brands_information.py
|
uosorio/heroku_face
|
7d6465e71dba17a15d8edaef520adb2fcd09d91e
|
[
"Apache-2.0"
] | 299 |
2015-01-30T09:52:39.000Z
|
2022-03-31T23:03:02.000Z
|
venv/lib/python3.7/site-packages/twilio/rest/preview/trusted_comms/brands_information.py
|
uosorio/heroku_face
|
7d6465e71dba17a15d8edaef520adb2fcd09d91e
|
[
"Apache-2.0"
] | 622 |
2015-01-03T04:43:09.000Z
|
2022-03-29T14:11:00.000Z
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class BrandsInformationList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version):
"""
Initialize the BrandsInformationList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationList
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationList
"""
super(BrandsInformationList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self):
"""
Constructs a BrandsInformationContext
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
return BrandsInformationContext(self._version, )
def __call__(self):
"""
Constructs a BrandsInformationContext
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
return BrandsInformationContext(self._version, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.BrandsInformationList>'
class BrandsInformationPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the BrandsInformationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationPage
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationPage
"""
super(BrandsInformationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of BrandsInformationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
return BrandsInformationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.BrandsInformationPage>'
class BrandsInformationContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version):
"""
Initialize the BrandsInformationContext
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
super(BrandsInformationContext, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/BrandsInformation'.format(**self._solution)
def fetch(self, if_none_match=values.unset):
"""
Fetch the BrandsInformationInstance
:param unicode if_none_match: Standard `If-None-Match` HTTP header
:returns: The fetched BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
headers = values.of({'If-None-Match': if_none_match, })
payload = self._version.fetch(method='GET', uri=self._uri, headers=headers, )
return BrandsInformationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.BrandsInformationContext {}>'.format(context)
class BrandsInformationInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload):
"""
Initialize the BrandsInformationInstance
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
super(BrandsInformationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'update_time': deserialize.iso8601_datetime(payload.get('update_time')),
'file_link': payload.get('file_link'),
'file_link_ttl_in_seconds': payload.get('file_link_ttl_in_seconds'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: BrandsInformationContext for this BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
if self._context is None:
self._context = BrandsInformationContext(self._version, )
return self._context
@property
def update_time(self):
"""
:returns: Creation time of the information retrieved
:rtype: datetime
"""
return self._properties['update_time']
@property
def file_link(self):
"""
:returns: The URL to the brands information
:rtype: unicode
"""
return self._properties['file_link']
@property
def file_link_ttl_in_seconds(self):
"""
:returns: How long will be the `file_link` valid
:rtype: unicode
"""
return self._properties['file_link_ttl_in_seconds']
@property
def url(self):
"""
:returns: The URL of this resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self, if_none_match=values.unset):
"""
Fetch the BrandsInformationInstance
:param unicode if_none_match: Standard `If-None-Match` HTTP header
:returns: The fetched BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
return self._proxy.fetch(if_none_match=if_none_match, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.BrandsInformationInstance {}>'.format(context)
| 34.342975 | 96 | 0.679461 |
8f8b794a3c57821e35ab3c5efd51b3bc2789f860
| 2,671 |
py
|
Python
|
plugins/tff_backend/models/nodes.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/models/nodes.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178 |
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/models/nodes.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2 |
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.4@@
from google.appengine.ext import ndb
from framework.models.common import NdbModel
from plugins.rogerthat_api.plugin_utils import Enum
from plugins.tff_backend.plugin_consts import NAMESPACE
class NodeStatus(Enum):
HALTED = 'halted'
RUNNING = 'running'
class WalletStatus(Enum):
ERROR = 'error'
LOCKED = 'locked'
UNLOCKED = 'unlocked'
class NodeChainStatus(NdbModel):
wallet_status = ndb.StringProperty(choices=WalletStatus.all())
block_height = ndb.IntegerProperty(default=0)
active_blockstakes = ndb.IntegerProperty(default=0)
network = ndb.StringProperty(default='standard', choices=['devnet', 'testnet', 'standard'])
confirmed_balance = ndb.IntegerProperty(default=0)
connected_peers = ndb.IntegerProperty(default=0)
address = ndb.StringProperty()
class Node(NdbModel):
NAMESPACE = NAMESPACE
serial_number = ndb.StringProperty()
last_update = ndb.DateTimeProperty()
username = ndb.StringProperty()
status = ndb.StringProperty(default=NodeStatus.HALTED)
status_date = ndb.DateTimeProperty()
info = ndb.JsonProperty()
chain_status = ndb.StructuredProperty(NodeChainStatus)
@property
def id(self):
return self.key.string_id().decode('utf-8')
@classmethod
def create_key(cls, node_id):
# type: (unicode) -> ndb.Key
return ndb.Key(cls, node_id, namespace=NAMESPACE)
@classmethod
def list_by_user(cls, username):
return cls.query().filter(cls.username == username)
@classmethod
def list_by_property(cls, property_name, ascending):
prop = None
if '.' in property_name:
for part in property_name.split('.'):
prop = getattr(prop if prop else cls, part)
else:
prop = getattr(cls, property_name)
return cls.query().order(prop if ascending else - prop)
@classmethod
def list_running_by_last_update(cls, date):
return cls.query().filter(cls.last_update < date).filter(cls.status == NodeStatus.RUNNING)
| 32.975309 | 98 | 0.706477 |
8f71414f26283f636acf131540cc80063b73c4d4
| 691 |
py
|
Python
|
components/amp-utility/python/Snd.py
|
ekmixon/AliOS-Things
|
00334295af8aa474d818724149726ca93da4645d
|
[
"Apache-2.0"
] | 4,538 |
2017-10-20T05:19:03.000Z
|
2022-03-30T02:29:30.000Z
|
components/amp-utility/python/Snd.py
|
ekmixon/AliOS-Things
|
00334295af8aa474d818724149726ca93da4645d
|
[
"Apache-2.0"
] | 1,088 |
2017-10-21T07:57:22.000Z
|
2022-03-31T08:15:49.000Z
|
components/amp-utility/python/Snd.py
|
willianchanlovegithub/AliOS-Things
|
637c0802cab667b872d3b97a121e18c66f256eab
|
[
"Apache-2.0"
] | 1,860 |
2017-10-20T05:22:35.000Z
|
2022-03-27T10:54:14.000Z
|
# * coding: UTF8 *
"""
这里所有的的接口仅需要调用一次即可,具体接口和参数如下所示。
=================================================================================================
"""
def install_codec_driver():
"""
声卡安装,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
def uninstall_codec_driver():
"""
声卡卸载,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
def init():
"""
初始化uVoice功能组件,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
def deinit():
"""
取消初始化uVoice功能组件,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
| 14.102041 | 97 | 0.489146 |
56e55db3074073781e32a309aaad46301011098d
| 2,768 |
py
|
Python
|
Packs/Okta/Integrations/OktaEventCollector/OktaEventCollector_test.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | null | null | null |
Packs/Okta/Integrations/OktaEventCollector/OktaEventCollector_test.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 40 |
2022-03-03T07:34:00.000Z
|
2022-03-31T07:38:35.000Z
|
Packs/Okta/Integrations/OktaEventCollector/OktaEventCollector_test.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | null | null | null |
from OktaEventCollector import ReqParams, Client, Request, GetEvents, Method
import pytest
req_params = ReqParams(since='', sortOrder='ASCENDING', limit='5')
request = Request(method=Method.GET, url='https://testurl.com', headers={}, params=req_params)
client = Client(request)
get_events = GetEvents(client)
id1 = {'uuid': 'a5b57ec5febb'}
id2 = {'uuid': 'a5b57ec5fecc'}
id3 = {'uuid': 'a12f3c5d77f3'}
id4 = {'uuid': 'a12f3c5dxxxx'}
class MockResponse:
def __init__(self, data):
self.data = data
def json(self):
return self.data
@pytest.mark.parametrize("events,ids,result", [
([id1, id2, id3], ['a12f3c5d77f3'], [id1, id2]),
([id1, id2, id3], ['a12f3c5dxxxx'], [id1, id2, id3]),
([], ['a12f3c5d77f3'], []),
([{'uuid': 0}, {'uuid': 1}, {'uuid': 2}, {'uuid': 3}, {'uuid': 4}, {'uuid': 5}, {'uuid': 6}, {'uuid': 7},
{'uuid': 8}, {'uuid': 9}], [0, 4, 7, 9],
[{'uuid': 1}, {'uuid': 2}, {'uuid': 3}, {'uuid': 5}, {'uuid': 6}, {'uuid': 8}])])
def test_remove_duplicates(events, ids, result):
assert get_events.remove_duplicates(events, ids) == result
@pytest.mark.parametrize("events,result", [
([{'published': '2022-04-17T12:31:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5faaa'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fbbb'},
{'published': '2022-04-17T12:33:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fccc'}],
{'after': '2022-04-17T12:33:36.667000', 'ids': ['1d0844b6-3148-11ec-9027-a5b57ec5fccc']}),
([{'published': '2022-04-17T12:31:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5faaa'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fbbb'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fccc'}], {'after': '2022-04-17T12:32:36.667000',
'ids': ['1d0844b6-3148-11ec-9027-a5b57ec5fccc',
'1d0844b6-3148-11ec-9027-a5b57ec5fbbb']})])
def test_get_last_run(events, result):
assert get_events.get_last_run(events) == result
@pytest.mark.parametrize("time", ['2022-04-17T12:32:36.667)'])
def test_set_since_value(time):
req_params.set_since_value(time)
assert req_params.since == time
def test_make_api_call(mocker):
mock_res = MockResponse([{1}, {1}, {1}, {1}, {1}])
mocker.patch.object(client, 'call', return_value=mock_res)
assert get_events.make_api_call() == [{1}, {1}, {1}, {1}, {1}]
mock_res.data = [{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}]
assert get_events.make_api_call() == [{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}]
| 42.584615 | 109 | 0.58526 |
85495a86fbc5eda9a5807aaf00f10f29d51d67f3
| 5,297 |
py
|
Python
|
SequenceModel/seq_model.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
SequenceModel/seq_model.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
SequenceModel/seq_model.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class SequenceModel(nn.Module):
def __init__(self, model_num, feature_dim, feature_num,
lstm_layers, hidden, drop_out, Add_position):
super(SequenceModel, self).__init__()
self.feature_num=feature_num
# seq model 1
self.fea_conv = nn.Sequential(
nn.Dropout2d(drop_out),
nn.Conv2d(feature_dim, 512, kernel_size=(1, 1), stride=(1,1), padding=(0,0), bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout2d(drop_out),
nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout2d(drop_out),
)
self.fea_first_final = nn.Sequential(nn.Conv2d(128 * feature_num, 6, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True))
# # bidirectional GRU
self.hidden_fea = hidden
self.fea_lstm = nn.GRU(128 * feature_num, self.hidden_fea, num_layers=lstm_layers, batch_first=True, bidirectional=True)
self.fea_lstm_final = nn.Sequential(nn.Conv2d(1, 6, kernel_size=(1, self.hidden_fea*2), stride=(1, 1), padding=(0, 0), dilation=1, bias=True))
ratio = 4
if Add_position:
model_num += 2
else:
model_num += 1
# seq model 2
self.conv_first = nn.Sequential(nn.Conv2d(model_num, 128*ratio, kernel_size=(5, 1), stride=(1,1), padding=(2,0), dilation=1, bias=False),
nn.BatchNorm2d(128*ratio),
nn.ReLU(),
nn.Conv2d(128*ratio, 64*ratio, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=2, bias=False),
nn.BatchNorm2d(64*ratio),
nn.ReLU())
self.conv_res = nn.Sequential(nn.Conv2d(64 * ratio, 64 * ratio, kernel_size=(3, 1), stride=(1, 1), padding=(4, 0), dilation=4, bias=False),
nn.BatchNorm2d(64 * ratio),
nn.ReLU(),
nn.Conv2d(64 * ratio, 64 * ratio, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=2, bias=False),
nn.BatchNorm2d(64 * ratio),
nn.ReLU(),)
self.conv_final = nn.Sequential(nn.Conv2d(64*ratio, 1, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0), dilation=1,bias=False))
# bidirectional GRU
self.hidden = hidden
self.lstm = nn.GRU(64*ratio*6, self.hidden, num_layers=lstm_layers, batch_first=True, bidirectional=True)
self.final = nn.Sequential(nn.Conv2d(1, 6, kernel_size=(1, self.hidden*2), stride=(1, 1), padding=(0, 0), dilation=1, bias=True))
def forward(self, fea, x):
batch_size, _, _, _ = x.shape
fea = self.fea_conv(fea)
fea = fea.permute(0, 1, 3, 2).contiguous()
fea = fea.view(batch_size, 128 * self.feature_num, -1).contiguous()
fea = fea.view(batch_size, 128 * self.feature_num, -1, 1).contiguous()
fea_first_final = self.fea_first_final(fea)
#################################################
out0 = fea_first_final.permute(0, 3, 2, 1)
#################################################
# bidirectional GRU
fea = fea.view(batch_size, 128 * self.feature_num, -1).contiguous()
fea = fea.permute(0, 2, 1).contiguous()
fea, _ = self.fea_lstm(fea)
fea = fea.view(batch_size, 1, -1, self.hidden_fea * 2)
fea_lstm_final = self.fea_lstm_final(fea)
fea_lstm_final = fea_lstm_final.permute(0, 3, 2, 1)
#################################################
out0 += fea_lstm_final
#################################################
out0_sigmoid = torch.sigmoid(out0)
x = torch.cat([x, out0_sigmoid], dim = 1)
x = self.conv_first(x)
x = self.conv_res(x)
x_cnn = self.conv_final(x)
#################################################
out = x_cnn
#################################################
# bidirectional GRU
x = x.view(batch_size, 256, -1, 6)
x = x.permute(0,2,1,3).contiguous()
x = x.view(batch_size, x.size()[1], -1).contiguous()
x, _= self.lstm(x)
x = x.view(batch_size, 1, -1, self.hidden*2)
x = self.final(x)
x = x.permute(0,3,2,1)
#################################################
out += x
#################################################
#res
return out, out0
if __name__ == '__main__':
model = SequenceModel(model_num=15, feature_dim = 128, feature_num=16,
lstm_layers = 2, hidden=128, drop_out=0.5,
Add_position = True)
print(model)
| 47.720721 | 150 | 0.473853 |
f12e84e71dc2614e3a6f1d2f7d671fe27072ff71
| 474 |
py
|
Python
|
py_framework/wsgi.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
py_framework/wsgi.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
py_framework/wsgi.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from wsgiref.simple_server import make_server
def application(envrion, start_response):
response_body = [
'{key}: {value}'.format(key=key, value=value) for key, value in sorted(envrion.items())
]
response_body = '\n'.join(response_body)
status = '200'
response_headers = [
('Content-type', 'text/plain'),
]
return [response_body.encode('utf-8')]
server = make_server('localhost', 8000, app=application)
server.serve_forever()
| 26.333333 | 95 | 0.670886 |
74be7f88c0d0fd7163d55362678d0731ca6c0782
| 1,066 |
py
|
Python
|
___Python/Carsten/p11_Excel/m01_Excel_einlesen.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Carsten/p11_Excel/m01_Excel_einlesen.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Carsten/p11_Excel/m01_Excel_einlesen.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import xlrd
import xlsxwriter
from p01_kennenlernen import meinebibliothek
df = pd.read_excel("O:\___Python\personen.xlsx") # importieren von excel nach python mit datumsangabe in Timestamp
print(df)
print()
df1 = pd.to_datetime(df["Geburtsdatum"]) # umwandeln von Timestamp in datetime
print(df1)
print()
alter = []
for geburtstag in df1: #verwenden der bereits gebauten Altersberechnung
alter.append(meinebibliothek.alter(geburtstag))
durchschnittsalter = sum(alter) / len(alter) # ermitteln des Durchschnittsalters
print ("Durchschnittsalter ", durchschnittsalter)
print()
df["Alter"] = alter # hinzufügen des berechneten Alters in die aus Excel eingelesene Tabelle
print(df)
writer = pd.ExcelWriter("O:\___Python\personen_bearbeitet.xlsx", engine="xlsxwriter") # erstellen eines Excel-"Writers" mit XlsxWriter
df.to_excel(writer, sheet_name='Sheet1') # konvertieren des dataframe in ein XlsxWriter Excel Objekt
writer.save() # schließen des Pandas Excel-"Writer" und exportieren des Excel-Dokuments
| 34.387097 | 135 | 0.766417 |
24a6f16182dda0c0b9e74d9f007b1abeaf6547e8
| 1,272 |
py
|
Python
|
Versuch2/task3.4.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | null | null | null |
Versuch2/task3.4.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | null | null | null |
Versuch2/task3.4.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | 1 |
2022-01-06T12:47:53.000Z
|
2022-01-06T12:47:53.000Z
|
import numpy as np
import cv2
# -------- Aufgabe3.4 -------- #
# Vector um Grenz- und Breitenwert zu speichern
vec = np.zeros((5, 2))
crop = ["crop1", "crop2", "crop3", "crop4", "crop5"]
# Bild einlesen und in Schwarz - Weiß umwandeln
image = cv2.imread('data/korrigiertes_bild2.png', cv2.IMREAD_GRAYSCALE)
# Grenz- und Breitenwerte für Bild 1
vec[0, 0] = 0
vec[0, 1] = 105
# Grenz- und Breitenwerte für Bild 2
vec[1, 0] = 111
vec[1, 1] = 135
# Grenz- und Breitenwerte für Bild 3
vec[2, 0] = 249
vec[2, 1] = 137
# Grenz- und Breitenwerte für Bild 4
vec[3, 0] = 389
vec[3, 1] = 132
# Grenz- und Breitenwerte für Bild 5
vec[4, 0] = 529
vec[4, 1] = 111
# Auf alle 5 Dateien zugreifen
for z in range(1, 6):
# Ab welchem Pixel in der Höhe y soll begonnen werden
y = 0
# Ab welchem Pixel in der Breite x soll begonnen werden
x = int(vec[z-1, 0])
# Wie tief soll der Pixel gehen in h
h = 480
# Wie breit soll der Pixel gehen in w
w = int(vec[z-1, 1])
# Schneiden des Bildes mit den Variablen von oben
crop[z - 1] = image[y:y + h, x:x + w]
# geschnittene Bilder anzeigen
cv2.imshow("Crop" + str(z), crop[z - 1])
# geschnittene Bilder exportieren
cv2.imwrite("korrigiert" + str(z) + ".png", crop[z-1])
cv2.waitKey(0)
| 25.959184 | 71 | 0.63522 |
568673ef2cde487c729769189c6ebe595faadce9
| 2,170 |
py
|
Python
|
kts/ui/leaderboard.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 18 |
2019-02-14T13:10:07.000Z
|
2021-11-26T07:10:13.000Z
|
kts/ui/leaderboard.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-02-17T14:06:42.000Z
|
2019-09-15T18:05:54.000Z
|
kts/ui/leaderboard.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-09-15T13:12:42.000Z
|
2020-04-15T14:05:54.000Z
|
import time
from kts.ui.components import HTMLRepr, Column, Field, Title, ThumbnailField, Raw
from kts.util.formatting import format_value
def format_experiment_date(date):
delta = time.time() - date
if delta < 60 * 60 * 24:
return format_value(delta, time=True) + ' ago'
else:
return format_value(date, time=True)
class Leaderboard(HTMLRepr):
"""Needs refactoring, very sketchy"""
def __init__(self, experiments):
self.experiments = experiments
self.col_widths = [1, 6, 5, 12, 6, 8, 8]
self.col_names = ['#', 'id', 'score', 'model', '# features', "date", "took"]
self.data = [
(
i,
e.id,
format_value(e.score),
e.model_class,
e.n_features,
format_experiment_date(e.date),
format_value(e.took, time=True)
)
for i, e in enumerate(experiments)
]
def head_style(self, i):
return dict(bg=False, accent=False, bold=False,
style=f"padding: 0px 5px; margin: 0px; width: {i}em; border: 0px;")
def cell_style(self, i):
return dict(bg=False, style=f"padding: 0px 5px; margin: 0px; width: {i}em; border: 0px;")
def concat(self, row):
return ' '.join(cell.html if not isinstance(cell, str) else cell for cell in row)
@property
def html(self):
head_cells = [Field(self.col_names[0], **self.head_style(self.col_widths[0]))]
for i in range(1, len(self.col_widths)):
head_cells.append(Field(self.col_names[i], **self.head_style(self.col_widths[i])))
rows = [[Field(self.data[i][j], **self.cell_style(self.col_widths[j]))
for j in range(len(self.data[0]))
] for i in range(len(self.data))]
rows = [Raw(e.html_collapsible(ThumbnailField(self.concat(rows[i]), css_id=-1, first=False), border=True)) for i, e in enumerate(self.experiments)]
res = Column([Title('leaderboard'), Field(self.concat(head_cells), bg=False, bold=False, style="padding-bottom: 0px; margin: 0px 2px 0px 2px;")] + rows)
return res.html
| 38.070175 | 160 | 0.595392 |
3b590c3afdc8778783a821b7e7abd8d729518eda
| 6,099 |
py
|
Python
|
old_combine_chrX.py
|
nikbaya/chrX
|
9d7859c60ecf35a5db13b973a7d2e44472a08ca6
|
[
"MIT"
] | null | null | null |
old_combine_chrX.py
|
nikbaya/chrX
|
9d7859c60ecf35a5db13b973a7d2e44472a08ca6
|
[
"MIT"
] | null | null | null |
old_combine_chrX.py
|
nikbaya/chrX
|
9d7859c60ecf35a5db13b973a7d2e44472a08ca6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 11:26:20 2018
@author: nbaya
"""
import os
import glob
import re
import pandas as pd
from subprocess import call
from joblib import Parallel, delayed
import multiprocessing
import sys
import numpy as np
v3_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/imputed-v3-results/"
#Get saved phenotypes
malefiles = (list(map(os.path.basename,glob.glob(v3_path+"*.male*.gz")))) #restrict to male files to prevent counting phenotype twice
find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
savedphenotypes = list(map(lambda filename: re.search(find,filename).group(1), malefiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#Get all phenotypes
allphenotypes = pd.Series.tolist(pd.read_table(v3_path+"phenotypes.both_sexes.tsv").iloc[:]["phenotype"]) #list of all phenotypes (male & female)
allphenotypes = pd.DataFrame({'phenotype':allphenotypes})
allphenotypes.to_csv(v3_path+"allphenotypeslist.tsv",sep = "\t")
# TEMPORARY -------------------------------------------------------------------
#savedFiles= (list(map(os.path.basename,glob.glob(chrX_path+"*.gz")))) #restrict to male files to prevent counting phenotype twice
#find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
#newphenotypes = list(map(lambda filename: re.search(find,filename).group(1), savedFiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#
#nextphenotypes = list(set(savedphenotypes).difference(set(newphenotypes)))
#
#len(nextphenotypes)
# -----------------------------------------------------------------------------
n_cores = multiprocessing.cpu_count()
#old method of extracting chrX
def prev_chrX_from_saved_phenotypes(ph):
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t')
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX = pd.merge(chrX_male,chrX_female, on = 'variant',suffixes = ("_male","_female"))
chrX.to_csv(chrX_path+ph+".chrX.tsv.gz",sep = '\t', compression = 'gzip')
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in savedphenotypes)
# TEMPORARY -------------------------------------------------------------------
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in nextphenotypes)
# -----------------------------------------------------------------------------
#def chrX_from_new_phenotypes(ph):
#
## call(["gsutil" ,"cp","gs://ukbb-gwas-imputed-v3-results/export1/"+ph+".**male*",
## "~/Documents/lab/ukbb-sexdiff/chrX/"])
#
#
# call('gsutil ls gs://ukbb-gwas-imputed-v3-results/export1/'+ph+'.**male*', shell=True)
## "~/Documents/lab/ukbb-sexdiff/chrX/',)
## call(["paste","<(cat", ph, ".imputed_v3.results.female.tsv.gz","|","zcat",
## "|" , "cut -f 1,2,3,5,6,8)", "<(cat", ph,".imputed_v3.results.male.tsv.gz" ,
## "|", "zcat", "|", "cut", "-f", "1,2,3,5,6,8)", "|", "awk" ,"\'", "NR==1{",
## "print", "\"variant\",\"n_female\",\"n_male\",\"frq_female\",\"frq_male\",\"beta_female\",\"se_female\",\"p_female\",\"beta_male\",\"se_male\",\"p_male\"",
## "}NR>1", "&&", "$1==$7{", "maff=$3/(2*$2);" , "mafm=$9/(2*$8);" ,
## "if(maff > .05 && maff<.95 && mafm > .05 && mafm < .95){",
## "print $1,$2,$8,maff,mafm,$4,$5,$6,$10,$11,$12} }\' | gzip >", ph, ".sexdiff.gz]"])
#
#testph = ['46','47']
#
#for ph in testph:
# chrX_from_new_phenotypes(ph)
#for ph in set(allphenotypes).difference(set(savedphenotypes)): #for all phenotypes not saved
# -----------------------------------------------------------------------------
chrX_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/chrX/data/"
ph = "1757"
#Males
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_male = chrX_male.reset_index() #necessary for upcoming concat between chrX_male and a3
a1 = np.asarray(chrX_male.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_male2 = pd.concat([a3[[0,1,3,2]],chrX_male], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_male2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_male2.to_csv(chrX_path+ph+".chrX.male.tsv.gz",sep = '\t', compression = 'gzip')
#Females
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX_female = chrX_female.reset_index() #necessary for upcoming concat between chrX_female and a3
a1 = np.asarray(chrX_female.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_female2 = pd.concat([a3[[0,1,3,2]],chrX_female], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_female2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_female2.to_csv(chrX_path+ph+".chrX.female.tsv.gz",sep = '\t', compression = 'gzip')
| 42.950704 | 178 | 0.61174 |
7954a7bbe8ccac9a9d76513832ed91b4c1c715ad
| 3,075 |
py
|
Python
|
tests/onegov/town6/test_views.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/town6/test_views.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/town6/test_views.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import onegov.core
import onegov.org
from tests.shared import utils
def test_view_permissions():
utils.assert_explicit_permissions(onegov.org, onegov.org.OrgApp)
def test_notfound(client):
notfound_page = client.get('/foobar', expect_errors=True)
assert "Seite nicht gefunden" in notfound_page
assert notfound_page.status_code == 404
def test_links(client):
root_url = client.get('/').pyquery('.side-navigation a').attr('href')
client.login_admin()
root_page = client.get(root_url)
new_link = root_page.click("Verknüpfung")
assert "Neue Verknüpfung" in new_link
new_link.form['title'] = 'Google'
new_link.form['url'] = 'https://www.google.ch'
link = new_link.form.submit().follow()
assert "Sie wurden nicht automatisch weitergeleitet" in link
assert 'https://www.google.ch' in link
client.get('/auth/logout')
root_page = client.get(root_url)
assert "Google" in root_page
google = root_page.click("Google", index=0)
assert google.status_code == 302
assert google.location == 'https://www.google.ch'
def test_clipboard(client):
client.login_admin()
page = client.get('/topics/organisation')
assert 'paste-link' not in page
page = page.click(
'Kopieren',
extra_environ={'HTTP_REFERER': page.request.url}
).follow()
assert 'paste-link' in page
page = page.click('Einf').form.submit().follow()
assert '/organisation/organisation' in page.request.url
def test_clipboard_separation(client):
client.login_admin()
page = client.get('/topics/organisation')
page = page.click('Kopieren')
assert 'paste-link' in client.get('/topics/organisation')
# new client (browser) -> new clipboard
client = client.spawn()
client.login_admin()
assert 'paste-link' not in client.get('/topics/organisation')
def test_gobal_tools(client):
links = client.get('/').pyquery('.globals a')
assert links == []
client.login_admin()
links = client.get('/').pyquery('.globals a')
assert links != []
def test_top_navigation(client):
links = client.get('/').pyquery('.side-navigation a span')
assert links.text() == 'Organisation Themen Kontakt Aktuelles'
def test_announcement(client):
client.login_admin()
color = '#006fbb'
bg_color = '#008263'
text = 'This is an announcement which appears on top of the page'
settings = client.get('/header-settings')
# test default not giving the color
assert settings.form['left_header_announcement_bg_color'].value == (
'#FBBC05'
)
assert settings.form['left_header_announcement_font_color'].value == (
'#000000'
)
settings.form['left_header_announcement'] = text
settings.form['left_header_announcement_bg_color'] = bg_color
settings.form['left_header_announcement_font_color'] = color
page = settings.form.submit().follow()
assert text in page
assert (
f'<div id="announcement" style="color: {color}; '
f'background-color: {bg_color};">'
) in page
| 27.212389 | 74 | 0.67935 |
5c05545589141fb82f106641f89a15eb131c03e9
| 301 |
py
|
Python
|
python/image_processing/rotation_img.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/image_processing/rotation_img.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/image_processing/rotation_img.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import numpy as np
import cv2
img = cv2.imread('city2.jpg',0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow('image cv2',dst)
cv2.waitKey(0)
# to save the image
# cv2.imwrite('image1.png',img)
cv2.destroyAllWindows()
| 15.05 | 49 | 0.704319 |
6908c59f82b4dce18b0359af8fb11f6688af03cf
| 3,200 |
py
|
Python
|
test/test_npu/test_network_ops/test_sin.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_sin.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_sin.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestSin(TestCase):
def cpu_op_exec(self, input1):
output = torch.sin(input1)
output = output.numpy()
return output
def npu_op_exec(self, input1):
output = torch.sin(input1)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
torch.sin(input1, out=input2)
output = input2.to("cpu")
output = output.numpy()
return output
def test_sin_common_shape_format(self, device):
shape_format = [
[[np.float32, 0, (5,3)]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -10, 10)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_sin_out_common_shape_format(self, device):
shape_format = [
[[np.float16, -1, (4, 3, 128, 128)], [np.float16, -1, (4, 3, 128, 128)]],
[[np.float16, 0, (4, 3, 128, 128)], [np.float16, 0, (10, 3, 64, 128)]],
[[np.float16, 0, (4, 3, 128, 128)], [np.float16, 0, (2, 3, 256, 128)]],
[[np.float32, 0, (4, 3, 128, 128)], [np.float32, 0, (4, 3, 128, 128)]],
[[np.float32, 0, (4, 3, 128, 128)], [np.float32, 0, (8, 3, 64, 128)]],
[[np.float32, -1, (4, 3, 128, 128)], [np.float32, -1, (4, 3, 256, 64)]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -10, 10)
cpu_input2, npu_input2 = create_common_tensor(item[0], -10, 10)
cpu_input3, npu_input3 = create_common_tensor(item[1], -10, 10)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output_out1 = self.npu_op_exec_out(npu_input1, npu_input2)
npu_output_out2 = self.npu_op_exec_out(npu_input1, npu_input3)
cpu_output = cpu_output.astype(npu_output_out1.dtype)
self.assertRtolEqual(cpu_output, npu_output_out1)
self.assertRtolEqual(cpu_output, npu_output_out2)
instantiate_device_type_tests(TestSin, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 42.105263 | 89 | 0.637813 |
2456670193398aaa3aace6b23615d552f25b4839
| 5,825 |
py
|
Python
|
test/test_npu/test_network_ops/test_neg.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_neg.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_neg.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNeg(TestCase):
def cpu_op_exec(self, input1):
output = torch.neg(input1)
output = output.numpy()
return output
def npu_op_exec(self, input1):
output = torch.neg(input1)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
torch.neg(input1, out=input2)
output = input2.to("cpu")
output = output.numpy()
return output
def cpu_inp_op_exec(self, input1):
torch.neg_(input1)
output = input1.numpy()
return output
def npu_inp_op_exec(self, input1):
torch.neg_(input1)
output = input1.to("cpu")
output = output.numpy()
return output
def neg_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output = self.npu_op_exec(npu_input1)
cpu_output = cpu_output.astype(npu_output.dtype)
self.assertRtolEqual(cpu_output, npu_output)
cpu_input_inp, npu_input_inp = create_common_tensor(item[0], -100, 100)
if cpu_input_inp.dtype == torch.float16:
cpu_input_inp = cpu_input_inp.to(torch.float32)
cpu_output_inp = self.cpu_inp_op_exec(cpu_input_inp)
npu_output_inp = self.npu_inp_op_exec(npu_input_inp)
cpu_output_inp = cpu_output_inp.astype(npu_output_inp.dtype)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def neg_out_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], -100, 100)
cpu_input3, npu_input3 = create_common_tensor(item[1], -100, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output_out1 = self.npu_op_exec_out(npu_input1, npu_input2)
npu_output_out2 = self.npu_op_exec_out(npu_input1, npu_input3)
cpu_output = cpu_output.astype(npu_output_out1.dtype)
self.assertRtolEqual(cpu_output, npu_output_out1)
self.assertRtolEqual(cpu_output, npu_output_out2)
def test_neg_out_result(self, device):
shape_format = [
[[np.float16, 0, [128, 116, 14, 14]], [np.float16, 0, [256, 116, 1, 1]]],
[[np.float16, 0, [128, 58, 28, 28]], [np.float16, 0, [58, 58, 1, 1]]],
[[np.float16, 0, [128, 3, 224, 224]], [np.float16, 0, [3, 3, 3, 3]]],
[[np.float16, 0, [128, 116, 14, 14]], [np.float16, 0, [116, 116, 1, 1]]],
[[np.float32, 0, [256, 128, 7, 7]], [np.float32, 0, [128, 128, 3, 3]]],
[[np.float32, 0, [256, 3, 224, 224]], [np.float32, 0, [3, 3, 7, 7]]],
[[np.float32, 0, [2, 3, 3, 3]], [np.float32, 0, [3, 1, 3, 3]]],
[[np.float32, 0, [128, 232, 7, 7]], [np.float32, 0, [232, 232, 1, 1]]],
]
self.neg_out_result(shape_format)
def test_neg_shape_format_fp16_1d(self, device):
format_list = [0, 3]
shape_format = [[[np.float16, i, [96]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_1d(self, device):
format_list = [0, 3]
shape_format = [[[np.float32, i, [96]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_2d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [448, 1]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_2d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [448, 1]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_3d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [64, 24, 38]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_3d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [64, 24, 38]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_4d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_4d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.neg_result(shape_format)
instantiate_device_type_tests(TestNeg, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 41.607143 | 85 | 0.635365 |
3004c3f59cd9236cf1638000226994613db59de2
| 733 |
py
|
Python
|
src/kinect/Kinect.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | 7 |
2015-11-27T09:53:32.000Z
|
2021-01-13T17:35:54.000Z
|
src/kinect/Kinect.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/kinect/Kinect.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
import time
import numpy as np
from freenect import sync_get_depth as get_depth, sync_get_video as get_video
class Kinect(object):
"""Offers access to rgb and depth from the real Kinect"""
def __init__(self):
pass
def get_frame(self, record=False):
# Get a fresh frame
(depth,_) = get_depth(format=4)
(rgb,_) = get_video()
if record:
self.snapshot(rgb, depth)
return (rgb, depth)
def snapshot(self, rgb, depth):
filename = "frames/frame-%d" % int(time.time()*1000)
filename_rgb = filename + "-rgb"
filename_depth = filename + "-depth"
np.save(filename_rgb, rgb)
np.save(filename_depth, depth)
| 31.869565 | 78 | 0.604366 |
063b14d87276bf24509b35e8dd89ae6d528821b9
| 518 |
py
|
Python
|
Online-Judges/DimikOJ/Python/64-caesar-cipher-2.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/DimikOJ/Python/64-caesar-cipher-2.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/DimikOJ/Python/64-caesar-cipher-2.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def decryped_data(text, key):
char_lower = ""
for i in range(97, 123):
char_lower += chr(i)
char_lower *= 2
char_upper = char_lower.upper()
decryped = ""
for i in text:
if i in char_lower:
decryped += char_lower[char_lower.index(i, 26) - key]
elif i in char_upper:
decryped += char_upper[char_upper.index(i, 26) - key]
else:
decryped += i
return decryped
text = input()
key = int(input())
print(decryped_data(text,key))
| 24.666667 | 65 | 0.57722 |
064427ba3481c1d9ed4c628c04dbaf55a12eda29
| 365 |
py
|
Python
|
202-happy-number/202-happy-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
202-happy-number/202-happy-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
202-happy-number/202-happy-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def isHappy(self, n: int) -> bool:
pool = set()
pool.add(n)
result=n
while(result>1):
strn = str(result)
result = 0
for c in strn:
result+=int(c)*int(c)
if result in pool:
return False
pool.add(result)
return True
| 26.071429 | 38 | 0.441096 |
0675b9a64430a3b476aa0125ccfd22711ba0b255
| 6,356 |
py
|
Python
|
Contents/Code/zdfneo.py
|
typekitrel/abctestard
|
1df43561327694ba155f513ad152aab51c56ef42
|
[
"MIT"
] | null | null | null |
Contents/Code/zdfneo.py
|
typekitrel/abctestard
|
1df43561327694ba155f513ad152aab51c56ef42
|
[
"MIT"
] | null | null | null |
Contents/Code/zdfneo.py
|
typekitrel/abctestard
|
1df43561327694ba155f513ad152aab51c56ef42
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# zdfneo.py - Aufruf durch __init__.py/ZDF_get_content
#
# Die Funktionen dienen zur Auswertung der ZDF-Neo-Seiten
#
Neo_Base = 'https://www.neo-magazin-royale.de'
PREFIX = '/video/ardmediathek2016/zdfneo'
####################################################################################################
@route(PREFIX + '/neo_content')
def neo_content(path, ID, offset=0):
Log('neo_content')
# JUMPPATH = 'https://www.neo-magazin-royale.de/zdi/?start=%s&count=8' # auch redakt. Beiträge
# JUMPPATH: start=0: Seite 1, 8=Seite 2
JUMPPATH = 'https://www.neo-magazin-royale.de/zdi/themen/134270/thema-ganze-folge.html?start=%s&count=8'
title_main = 'NEO MAGAZIN ROYALE'
if offset == 0: # 1. Pfad (aus ZDF_get_content) verwerfen, jumppath enthält ganze Folgen
path = JUMPPATH % str(0)
page = HTTP.Request(path).content
pagination = blockextract('class="pagination', page) # "pagination active" = akt. Seite
page_cnt = len(pagination)
last_page = stringextract('count=8">', '</a>', pagination[-1]) # letzte Seite
act_page = stringextract('pagination active">', 'a>', page)
act_page = stringextract('count=8">', '<', act_page)
if offset == 0:
act_page = '1'
cnt_per_page = 8
oc = ObjectContainer(title2='Seite ' + act_page, view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
content = blockextract('class="modules', page)
if len(oc) == 0:
msg_notfound = title + ': Auswertung fehlgeschlagen'
title = msg_notfound.decode(encoding="utf-8", errors="ignore")
name = "ZDF Mediathek"
summary = 'zurück zur ' + name.decode(encoding="utf-8", errors="ignore")
oc.add(DirectoryObject(key=Callback(Main_ZDF, name=name), title=title,
summary=summary, tagline='TV', thumb=R(ICON_MAIN_ZDF)))
return oc
for rec in content:
url = Neo_Base + stringextract('href="', '"', rec)
img = stringextract('sophoraimage="', '"', rec) # ZDF-Pfad
if img == '':
img = Neo_Base + stringextract('src="', '"', rec) # NEO-Pfad ohne Base
img = img.decode(encoding="utf-8", errors="ignore") # Umlaute im Pfad (hurensöhne_mannheims)
img_alt = 'Bild: ' + stringextract('alt="', '"', rec)
img_alt = unescape_neo(img_alt)
img_alt = img_alt.decode(encoding="utf-8", errors="ignore")
title = stringextract('name">', '</h3', rec)
if title == '':
title = stringextract('content="', '"', rec)
dataplayer = stringextract('data-player="', '"', rec)
sid = stringextract('data-sophoraid="', '"', rec)
datetime = ''
if 'datetime=""' in rec:
datetime = stringextract('datetime="">', '</time>', rec)# datetime="">07.09.2016</time>
else:
datetime = stringextract('datetime="', '</time>', rec) # ="2017-05-18 18:10">18.05.2017</time>
datetime = datetime[11:] # 1. Datum abschneiden
datetime = datetime.replace('">', ', ')
Log('neuer Satz:')
Log(url);Log(img);Log(title);Log(dataplayer);Log(sid);Log(datetime);
title = title.decode(encoding="utf-8", errors="ignore")
oc.add(DirectoryObject(key=Callback(GetNeoVideoSources, url=url, sid=sid, title=title, summary=datetime,
tagline=img_alt, thumb=img), title=title, summary=datetime, tagline=img_alt, thumb=img))
# Prüfung auf Mehr
Log('offset: ' + str(offset));Log(act_page); Log(last_page)
if int(act_page) < int(last_page):
offset = int(offset) + 8
JUMPPATH = JUMPPATH % offset
Log(JUMPPATH);
oc.add(DirectoryObject(key=Callback(neo_content, path=JUMPPATH, ID=ID, offset=offset),
title=title_main, thumb=R(ICON_MEHR), summary=''))
return oc
#-------------------------
@route(PREFIX + '/GetNeoVideoSources')
# Ladekette ähnlich ZDF (get_formitaeten), aber nur bei videodat_url identisch
def GetNeoVideoSources(url, sid, title, summary, tagline, thumb):
Log('GetNeoVideoSources url: ' + url)
oc = ObjectContainer(title2='Videoformate', view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
formitaeten = get_formitaeten(sid=sid, ID='NEO') # Video-URL's ermitteln
if formitaeten == '': # Nachprüfung auf Videos
msg = 'Videoquellen zur Zeit nicht erreichbar' + ' Seite:\r' + url
return ObjectContainer(header='Error', message=msg)
only_list = ["h264_aac_ts_http_m3u8_http"]
oc, download_list = show_formitaeten(oc=oc, title_call=title, formitaeten=formitaeten, tagline=tagline,
thumb=thumb, only_list=only_list)
title_oc='weitere Video-Formate'
if Prefs['pref_use_downloads']:
title=title + ' und Download'
# oc = Parseplaylist(oc, videoURL, thumb) # hier nicht benötigt - das ZDF bietet bereits 3 Auflösungsbereiche
oc.add(DirectoryObject(key=Callback(NEOotherSources, title=title, tagline=tagline, thumb=thumb, sid=sid),
title=title_oc, summary='', thumb=R(ICON_MEHR)))
return oc
#-------------------------
@route(PREFIX + '/NEOotherSources')
def NEOotherSources(title, tagline, thumb, sid):
Log('NEOotherSources')
title_org = title # Backup für Textdatei zum Video
summary_org = tagline # Tausch summary mit tagline (summary erstrangig bei Wiedergabe)
oc = ObjectContainer(title2='Videoformate', view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
formitaeten = get_formitaeten(sid=sid, ID='NEO') # Video-URL's ermitteln
if formitaeten == '': # Nachprüfung auf Videos
msg = 'Video leider nicht mehr vorhanden' + ' Seite:\r' + url
return ObjectContainer(header='Error', message=msg)
only_list = ["h264_aac_mp4_http_na_na", "vp8_vorbis_webm_http_na_na", "vp8_vorbis_webm_http_na_na"]
oc, download_list = show_formitaeten(oc=oc, title_call=title, formitaeten=formitaeten, tagline=tagline,
thumb=thumb, only_list=only_list)
# high=0: 1. Video bisher höchste Qualität: [progressive] veryhigh
oc = test_downloads(oc,download_list,title_org,summary_org,tagline,thumb,high=0) # Downloadbutton(s)
return oc
####################################################################################################
# htmlentities in neo, Zeichen s. http://aurelio.net/bin/python/fix-htmldoc-utf8.py
# HTMLParser() versagt hier
def unescape_neo(line):
line_ret = (line.replace("ö", "ö").replace("ä", "Ä").replace("ü", "ü")
.replace("Ã\x96", "Ö").replace("Ã\x84", "Ä").replace("Ã\x9c", "Ü")
.replace("Ã\x9f", "ß"))
return line_ret
| 43.834483 | 112 | 0.660321 |
23300efdd697b2575e312f7edd92461f467cdc9c
| 161 |
py
|
Python
|
src/onegov/gis/forms/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gis/forms/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gis/forms/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.gis.forms.fields import CoordinatesField
from onegov.gis.forms.widgets import CoordinatesWidget
__all__ = ['CoordinatesField', 'CoordinatesWidget']
| 32.2 | 54 | 0.832298 |
88ec6c26cd7a2f727e00f467fdd178e22cb46386
| 810 |
py
|
Python
|
hello/hello_sqlite.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
hello/hello_sqlite.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
hello/hello_sqlite.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 导入SQLite驱动:
import sqlite3
# 连接到SQLite数据库
# 数据库文件是test.db
# 如果文件不存在,会自动在当前目录创建:
conn = sqlite3.connect('hello.db')
# 创建一个Cursor:
cursor = conn.cursor()
cursor.execute('drop table user')
# 执行一条SQL语句,创建user表:
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
# 继续执行一条SQL语句,插入一条记录:
cursor.execute('insert into user (id, name) values (\'1\', \'Michael\')')
cursor.execute('insert into user (id, name) values (\'2\', \'Jackson\')')
# 通过rowcount获得插入的行数:
print(cursor.rowcount)
# 查询:
print(cursor.execute('select * from user').fetchall())
print(cursor.execute('select * from user').fetchmany(size=1))
print(cursor.execute('select * from user').fetchone())
# 关闭Cursor:
cursor.close()
# 提交事务:
conn.commit()
# 关闭Connection:
conn.close()
| 26.129032 | 82 | 0.707407 |
001659507468ba211846a086bb3af6d259d15e23
| 409 |
py
|
Python
|
1704-determine-if-string-halves-are-alike/1704-determine-if-string-halves-are-alike.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
1704-determine-if-string-halves-are-alike/1704-determine-if-string-halves-are-alike.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
1704-determine-if-string-halves-are-alike/1704-determine-if-string-halves-are-alike.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def halvesAreAlike(self, s: str) -> bool:
vowel = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}
first = s[:int(len(s)/2)]
second = s[int(len(s)/2):]
firstsum = sum([1 for f in first if f in vowel])
secondsum = sum([1 for s in second if s in vowel])
if firstsum==secondsum:
return True
else:
return False
| 37.181818 | 66 | 0.484108 |
ae5e90efc8111cc99c18543d5afea38d02da46b8
| 125 |
py
|
Python
|
crawlerhttp/http_header_generator.py
|
mcmin001/NBCrawler
|
ec6a348e32889fb9252651b203a725a39b6836ec
|
[
"Apache-2.0"
] | null | null | null |
crawlerhttp/http_header_generator.py
|
mcmin001/NBCrawler
|
ec6a348e32889fb9252651b203a725a39b6836ec
|
[
"Apache-2.0"
] | null | null | null |
crawlerhttp/http_header_generator.py
|
mcmin001/NBCrawler
|
ec6a348e32889fb9252651b203a725a39b6836ec
|
[
"Apache-2.0"
] | null | null | null |
def get_http_header(user_agent):
# 字典数据类型 dict
headers = {
'user-agent': user_agent
}
return headers
| 17.857143 | 32 | 0.616 |
889eb3167b3872b5371c8a539d4347c8d68744c1
| 760 |
py
|
Python
|
SBTK_League_Helper/src/tools/exceptions.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
SBTK_League_Helper/src/tools/exceptions.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
SBTK_League_Helper/src/tools/exceptions.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
# This need to be sorted out in a smarter way
class InitializationError(Exception):
def __init__(self, SomeClass, description):
self.value = SomeClass
self.description = description.format(SomeClass.__name__)
def __str__(self):
return self.description
class ReservedValueError(Exception):
def __init__(self, expected, received, description):
self.value= received
self.expected = expected
self.description = description.format(expected, received)
def __str__(self):
return self.description
class ApplicationError(Exception):
pass
class NonFatalError(ApplicationError):
pass
class FatalError(Exception):
pass
class UserError(NonFatalError):
pass
| 24.516129 | 65 | 0.696053 |
ee476c7b28e95c420c92669fa0909df9dee5dae3
| 576 |
py
|
Python
|
ausgesondert/dammitJim.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
ausgesondert/dammitJim.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
ausgesondert/dammitJim.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
kraftausdruecke = [
"Mist",
"Verdammt",
"Mannmannmann",
"Herrgottnochmal",
"Echt jetzt",
"Zum Teufel"
]
berufe = [
"Baggerführer",
"Velokurier",
"Tierärztin",
"Verkehrspolizist",
"Schreinerin",
"Apotheker",
"Komponist",
"Physikerin",
"Buchhändlerin"
]
a = choice(kraftausdruecke)
# pick random element in list
# find out its index
# pop it from the list, so it can’t be picked again
b = berufe.pop(berufe.index(choice(berufe)))
c = choice(berufe)
print(a, "Erwin" + ",", "ich bin", b, "und nicht", c + "!")
| 20.571429 | 59 | 0.604167 |
e109e7b0486674fec7a7133e0f5ef96b64e2f7e2
| 9,962 |
py
|
Python
|
wz/ui/choice_grid.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui/choice_grid.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui/choice_grid.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ui/choice_grid.py
Last updated: 2021-05-04
Manage the grid for the puil-subject-choice-editor.
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
### Display texts
_PUPIL = "Schüler"
_GROUPS = "Gruppen"
## Measurements are in mm ##
_SEP_SIZE = 1
_HEIGHT_LINE = 6
_WIDTH_TOGGLE = 8
COLUMNS = (35, 15, 15, _SEP_SIZE) # + ...
ROWS = (
#title
12,
# info rows
_HEIGHT_LINE, _HEIGHT_LINE, _HEIGHT_LINE, _HEIGHT_LINE,
_HEIGHT_LINE, _HEIGHT_LINE,
# header (tags)
_HEIGHT_LINE, _SEP_SIZE
) # + _HEIGHT_LINE * n
# Content of marked toggle-cells
MARK = 'X'
#####################################################
from qtpy.QtWidgets import QApplication
from qtpy.QtGui import QColor, QBrush
from qtpy.QtCore import Qt
from ui.gridbase import GridBase
class ToggleGrid(GridBase):
"""A grid of toggle-cells with column and row headers (potentially
multi-row or multi-column respectively).
Clicking on a cell will toggle its value. SHIFT-clicking marks a cell
as the starting point of a rectangle. A further SHIFT-click marks
the end-point of the rectangle and toggles all cells within the
rectangle. The marking is removed.
The mark can also be removed by clicking elsewhere (without SHIFT).
"""
def __init__(self, gview, info, pupil_data, subjects):
"""<gview> is the "View" on which this "Scene" is to be presented.
<info>: general information, [[key, value], ... ]
<pupil_data>: A list of pupil lines, only valid sids are included:
[[pid, name, groups, {sid: val, ... }], ... ]
val: true if marked
<subjects>: The list of subjects, possibly containing spacers:
[[sid, name], ... , null-value, [sid, name], ... ]
"""
# Set up grid: get number of rows and columns
row_pids = len(ROWS)
_ROWS = ROWS + (_HEIGHT_LINE,) * len(pupil_data)
col_sids = len(COLUMNS)
_COLS = list(COLUMNS)
for s in subjects:
_COLS.append(_WIDTH_TOGGLE if s else _SEP_SIZE)
super().__init__(gview, _ROWS, _COLS)
self.styles()
# Horizontal separator (after headers)
self.basic_tile(row_pids - 1, 0, tag = None, text = None,
style = 'padding', cspan = len(_COLS))
# Vertical separator (before subjects)
col = col_sids
self.basic_tile(1, col_sids - 1, tag = None, text = None,
style = 'padding', rspan = len(_ROWS) - 1)
### Title area
self.basic_tile(0, 0, tag = None, text = "Fächer(ab)wahl",
style = 'title', cspan = 2)
self.basic_tile(0, 4, tag = None,
text = ADMIN.school_data['SCHOOL_NAME'],
style = 'titleR', cspan = 10)
### General Info
line = 1
for key, value in info:
self.basic_tile(line, 0, tag = None, text = key,
style = 'info')
# Non-editable
self.basic_tile(line, 1, tag = None, text = value,
style = 'info', cspan = 2)
line += 1
### Subject headers
line = 7
rspan = line - 1
self.basic_tile(line, 0, tag = None, text = _PUPIL,
style = 'small', cspan = 2)
self.basic_tile(line, 2, tag = None, text = _GROUPS,
style = 'small')
col = col_sids
self.sids = []
for sid_name in subjects:
if sid_name:
sid, name = sid_name
self.sids.append(sid)
self.basic_tile(line, col, tag = None, text = sid,
style = 'small')
self.basic_tile(1, col, tag = None, text = name,
style = 'v', rspan = rspan)
else:
# vertical spacer
self.basic_tile(1, col, tag = None, text = None,
style = 'padding', rspan = len(_ROWS) - 1)
col += 1
### Pupil lines
row = row_pids
# The array (list of lists) <self.toggles> is a simple matrix
# of the toggle-tiles, omitting the skipped columns.
self.toggles = []
self.pids = []
self.value0 = set() # Set of initially marked cells (x, y)
y = 0
for pid, pname, groups, choices in pupil_data:
self.basic_tile(row, 0, tag = None, text = pname,
style = 'name', cspan = 2)
self.basic_tile(row, 2, tag = None, text = groups,
style = 'small')
col = col_sids
x = 0
_toggles = []
for sid_name in subjects:
if sid_name:
try:
marked = choices[sid_name[0]]
except KeyError:
# Invalid key: not editable
tag = None
style = 'padding'
val = None
else:
tag = (x, y)
style = 'toggle'
if marked:
self.value0.add(tag)
val = MARK
else:
val = ''
tile = self.basic_tile(row, col, tag = tag,
text = val, style = style)
_toggles.append(tile)
x += 1
col += 1
self.pids.append(pid)
self.toggles.append(_toggles)
y += 1
row += 1
# Need a highlighted/selected QBrush for a toggle-cell
self.mark_brush = QBrush(QColor('#80FF7200'))
self.no_mark = self.style('toggle').bgColour or QBrush(Qt.NoBrush)
# Collect changed cell tags for signalling "table changed".
self._changes = set()
self.toggle_start = None
#
def styles(self):
"""Set up the styles used in the table view.
"""
self.new_style('base', font = ADMIN.school_data['FONT'], size = 11)
self.new_style('name', base = 'base', align = 'l')
self.new_style('title', font = ADMIN.school_data['FONT'], size = 12,
align = 'l', border = 0, highlight = 'b')
self.new_style('info', base = 'base', border = 0, align = 'l')
self.new_style('underline', base = 'base', border = 2)
self.new_style('titleR', base = 'title', align = 'r')
self.new_style('small', base = 'base', size = 10)
self.new_style('v', base = 'small', align = 'b')
self.new_style('toggle', base = 'base', highlight = ':002562',
mark = 'E00000')
# self.new_style('no-toggle', bg = '666666')
self.new_style('padding', bg = '666666')
#
def tile_left_clicked(self, tile):
if isinstance(tile.tag, tuple):
# toggle-tile
kbdmods = QApplication.keyboardModifiers()
if kbdmods & Qt.ShiftModifier:
if self.toggle_start:
# toggle range
c0, r0 = self.toggle_start.tag
c1, r1 = tile.tag
r_range = range(r0, r1 + 1) if r1 >= r0 \
else range(r1, r0 + 1)
c_range = range(c0, c1 + 1) if c1 >= c0 \
else range(c1, c0 + 1)
for r in r_range:
for c in c_range:
self.toggle(self.toggles[r][c])
else:
self.toggle_start = tile
# highlight cell
tile.setBrush(self.mark_brush)
return False
else:
self.toggle(tile)
if self.toggle_start:
# remove highlight
if self.toggle_start:
self.toggle_start.setBrush(self.no_mark)
self.toggle_start = None
return False
#
def toggle(self, tile):
val = '' if tile.value() else MARK
tile.setText(val)
if val:
if tile.tag in self.value0:
self.changes_discard(tile.tag)
else:
self.changes_add(tile.tag)
else:
if tile.tag in self.value0:
self.changes_add(tile.tag)
else:
self.changes_discard(tile.tag)
#
def changes_discard(self, tag):
if self._changes:
self._changes.discard(tag)
if not self._changes:
self._gview.set_changed(False)
#
def changes_add(self, tag):
if not self._changes:
self._gview.set_changed(True)
self._changes.add(tag)
#
def changes(self):
return list(self._changes)
#
def data(self):
"""Return choice data as a list of "non-chosen" subject lists.
[(pid, [sid, ...]), ... ]
Also pupils with empty lists are included.
"""
clist = []
y = 0
for row in self.toggles:
x = 0
slist = []
for sid in self.sids:
if row[x].value():
slist.append(sid)
x += 1
clist.append((self.pids[y], slist))
y += 1
return clist
| 36.490842 | 76 | 0.511443 |
6e74495ac01d11fb500db642fc48819334b6af0a
| 140 |
py
|
Python
|
k8s/the-project/kubeless/ok-func.py
|
cjimti/mk
|
b303e147da77776baf5fee337e356ebeccbe2c01
|
[
"MIT"
] | 1 |
2019-04-18T09:52:48.000Z
|
2019-04-18T09:52:48.000Z
|
k8s/the-project/kubeless/ok-func.py
|
cjimti/mk
|
b303e147da77776baf5fee337e356ebeccbe2c01
|
[
"MIT"
] | null | null | null |
k8s/the-project/kubeless/ok-func.py
|
cjimti/mk
|
b303e147da77776baf5fee337e356ebeccbe2c01
|
[
"MIT"
] | null | null | null |
import requests
def ok(event, context):
url = "http://ok:8080/"
response = requests.request("GET", url)
return response.text
| 15.555556 | 43 | 0.65 |
95a163ba2b23c18ae5bb7535ab4caa4e069308b6
| 144 |
py
|
Python
|
bolt/core/exceptions.py
|
ph7vc/CL4M-B0T
|
e992cf63b1215ea7c241cab94edc251653dbaed7
|
[
"MIT"
] | 9 |
2019-02-17T06:33:14.000Z
|
2021-10-05T02:19:00.000Z
|
bolt/core/exceptions.py
|
ns-phennessy/Bolt
|
e992cf63b1215ea7c241cab94edc251653dbaed7
|
[
"MIT"
] | 28 |
2019-02-10T07:48:05.000Z
|
2021-12-20T00:15:37.000Z
|
bolt/core/exceptions.py
|
ph7vc/CL4M-B0T
|
e992cf63b1215ea7c241cab94edc251653dbaed7
|
[
"MIT"
] | 4 |
2015-03-13T03:58:55.000Z
|
2015-05-27T08:29:46.000Z
|
class InvalidConfigurationError(Exception):
pass
class InvalidBotToken(Exception):
pass
class InvalidBotPlugin(Exception):
pass
| 13.090909 | 43 | 0.763889 |
c26b881427d152a0f3576dc1d7e1e0a52917ad82
| 8,165 |
py
|
Python
|
src/universal_build/helpers/build_docker.py
|
prototypefund/universal-build
|
809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7
|
[
"MIT"
] | 17 |
2020-11-20T15:58:02.000Z
|
2022-02-06T19:18:20.000Z
|
src/universal_build/helpers/build_docker.py
|
prototypefund/universal-build
|
809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7
|
[
"MIT"
] | 3 |
2021-02-17T13:47:44.000Z
|
2021-10-14T13:53:15.000Z
|
src/universal_build/helpers/build_docker.py
|
prototypefund/universal-build
|
809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7
|
[
"MIT"
] | 6 |
2020-11-23T09:51:26.000Z
|
2022-02-11T13:46:57.000Z
|
"""Utilities to help building Docker images."""
import argparse
import os
import subprocess
from typing import List, Optional
from universal_build import build_utils
FLAG_DOCKER_IMAGE_PREFIX = "docker_image_prefix"
def parse_arguments(
input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None
) -> dict:
"""Parses all arguments and returns a sanitized & augmented list of arguments.
Sanitized means that, for example, the version is already checked and set depending on our build guidelines.
If arguments are not valid, exit the script run.
Args:
input_args (List[str], optional): List of arguments that are used instead of the arguments passed to the process. Defaults to `None`.
argument_parser (arparse.ArgumentParser, optional): An argument parser which is passed as a parents parser to the default ArgumentParser to be able to use additional flags besides the default ones.
Returns:
dict: The parsed default arguments thar are already checked for validity.
"""
if argument_parser is None:
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"--" + FLAG_DOCKER_IMAGE_PREFIX.replace("_", "-"),
help="Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.",
required=False,
default="",
)
return build_utils.parse_arguments(
input_args=input_args, argument_parser=argument_parser
)
def check_image(
image: str, trivy: bool = True, exit_on_error: bool = True
) -> subprocess.CompletedProcess:
"""Run vulnerability checks on Dockerimage.
Args:
image (str): The name of the docker image to check.
trivy (bool, optional): Activate trivy vulnerability check. Defaults to `True`.
exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs.
"""
build_utils.log("Run vulnerability checks on docker image:")
if trivy and build_utils.command_exists("trivy", exit_on_error=exit_on_error):
return build_utils.run(
f"trivy image --timeout=20m0s --exit-code 1 --severity HIGH,CRITICAL {image}",
exit_on_error=exit_on_error,
)
return subprocess.CompletedProcess(args="", returncode=-1, stdout="", stderr="")
# TODO: Implement dockl container scan
def lint_dockerfile(
hadolint: bool = True, dockerfile: str = "Dockerfile", exit_on_error: bool = True
) -> None:
"""Run hadolint on the Dockerfile.
Args:
hadolint (bool, optional): Activate hadolint dockerfile linter. Defaults to `True`.
dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used.
exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`.
"""
build_utils.log("Run linters and style checks:")
if hadolint and build_utils.command_exists("hadolint", exit_on_error=exit_on_error):
config_file_arg = ""
if os.path.exists(".hadolint.yml"):
config_file_arg = "--config=.hadolint.yml"
build_utils.run(
f"hadolint {config_file_arg} {dockerfile}", exit_on_error=exit_on_error
)
def get_image_name(name: str, tag: str, image_prefix: str = "") -> str:
"""Get a valid versioned image name.
Args:
name (str): Name of the docker image.
tag (str): Version to use for the tag.
image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
Returns:
str: a valid docker image name based on: prefix/name:tag
"""
versioned_tag = name.strip() + ":" + tag.strip()
if image_prefix:
versioned_tag = image_prefix.strip().rstrip("/") + "/" + versioned_tag
return versioned_tag
def build_docker_image(
name: str,
version: str,
build_args: str = "",
docker_image_prefix: str = "",
dockerfile: Optional[str] = None,
additional_build_args: str = "",
exit_on_error: bool = True,
) -> subprocess.CompletedProcess:
"""Build a docker image from a Dockerfile in the working directory.
Args:
name (str): Name of the docker image.
version (str): Version to use as tag.
build_args (str, optional): Add additional build arguments for docker build.
docker_image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used.
exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs.
Returns:
subprocess.CompletedProcess: Returns the CompletedProcess object of the
"""
# Check if docker exists on the system
build_utils.command_exists("docker", exit_on_error=exit_on_error)
versioned_tag = get_image_name(name=name, tag=version)
latest_tag = get_image_name(name=name, tag="latest")
dockerfile_command = ""
if dockerfile:
dockerfile_command = " -f " + dockerfile
completed_process = build_utils.run(
"docker build "
+ dockerfile_command
+ "-t "
+ versioned_tag
+ " -t "
+ latest_tag
+ " "
+ build_args
+ " ./",
exit_on_error=exit_on_error,
)
if completed_process.returncode > 0:
build_utils.log(f"Failed to build Docker image {versioned_tag}")
return completed_process
if docker_image_prefix:
remote_versioned_tag = get_image_name(
name=name, tag=version, image_prefix=docker_image_prefix
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_versioned_tag,
exit_on_error=exit_on_error,
)
return completed_process
def release_docker_image(
name: str, version: str, docker_image_prefix: str, exit_on_error: bool = True
) -> subprocess.CompletedProcess:
"""Push a Docker image to a repository.
Args:
name (str): The name of the image. Must not be prefixed!
version (str): The tag used for the image.
docker_image_prefix (str): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`.
Returns:
subprocess.CompletedProcess: Returns the CompletedProcess object of the `docker push ...` command.
"""
# Check if docker exists on the system
build_utils.command_exists("docker", exit_on_error=exit_on_error)
if not docker_image_prefix:
build_utils.log(
"The flag --docker-image-prefix cannot be blank when pushing a Docker image."
)
build_utils.exit_process(build_utils.EXIT_CODE_GENERAL)
versioned_tag = get_image_name(name=name, tag=version)
remote_versioned_tag = get_image_name(
name=name, tag=version, image_prefix=docker_image_prefix
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_versioned_tag,
exit_on_error=exit_on_error,
)
completed_process = build_utils.run(
"docker push " + remote_versioned_tag, exit_on_error=exit_on_error
)
if completed_process.returncode > 0:
build_utils.log(f"Failed to release Docker image {name}:{version}")
# Only push version with latest tag if no suffix is added (pre-release)
if "-" not in version:
remote_latest_tag = get_image_name(
name=name, tag="latest", image_prefix=docker_image_prefix
)
build_utils.log(
"Release Docker image with latest tag as well: " + remote_latest_tag
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_latest_tag,
exit_on_error=exit_on_error,
)
build_utils.run("docker push " + remote_latest_tag, exit_on_error=exit_on_error)
return completed_process
| 37.113636 | 205 | 0.679731 |
c2bbc6212ba14cce222e1171cae69fdb2905ea98
| 727 |
py
|
Python
|
uploadHelpers.py
|
BNUZ-China/iGem-Wiki
|
18216737bbd1d5316e5302ff7202a9fa139ad033
|
[
"MIT"
] | 1 |
2021-08-28T15:06:10.000Z
|
2021-08-28T15:06:10.000Z
|
uploadHelpers.py
|
BNUZ-China/iGem-Wiki
|
18216737bbd1d5316e5302ff7202a9fa139ad033
|
[
"MIT"
] | null | null | null |
uploadHelpers.py
|
BNUZ-China/iGem-Wiki
|
18216737bbd1d5316e5302ff7202a9fa139ad033
|
[
"MIT"
] | null | null | null |
import os
from subprocess import run
import pyperclip
import webbrowser
from urllib import parse
location = 'production'
def runOnSingleFolder(folder):
file_list = os.listdir(os.path.join(location, folder))
for file in file_list:
file_noextend = file[:-(len(folder) + 1)]
url = f'https://2021.igem.org/wiki/index.php?title=Template:BNUZ-China/{folder}/{parse.quote(file_noextend)}&action=edit'
webbrowser.open(url)
print(url)
with open(os.path.join(location, folder, file), encoding='utf-8') as f:
content = f.read()
pyperclip.copy(content)
print('相应js代码已经复制,请粘贴至打开的网页,完成后请回车')
input()
runOnSingleFolder('js')
runOnSingleFolder('css')
| 29.08 | 129 | 0.672627 |
66bccd1b00412b945cbbdb0f6a0be3ab3a3ef37f
| 158 |
py
|
Python
|
tests/cli.py
|
joesitton/Ciphey
|
862555f13e3915428a2f4ada5538fdf0be77ffcd
|
[
"MIT"
] | 9,908 |
2020-06-06T01:06:50.000Z
|
2022-03-31T21:22:57.000Z
|
tests/cli.py
|
joesitton/Ciphey
|
862555f13e3915428a2f4ada5538fdf0be77ffcd
|
[
"MIT"
] | 423 |
2020-05-30T11:44:37.000Z
|
2022-03-18T03:15:30.000Z
|
tests/cli.py
|
joesitton/Ciphey
|
862555f13e3915428a2f4ada5538fdf0be77ffcd
|
[
"MIT"
] | 714 |
2020-06-09T20:24:41.000Z
|
2022-03-29T15:28:53.000Z
|
import subprocess
from sys import exit
result = subprocess.check_output(["ciphey", "-q", "-t 'hello'"])
if "hello" in result:
exit(0)
else:
exit(1)
| 15.8 | 64 | 0.651899 |
dd788c7b5bde6a0a3088e641302680a262892fc0
| 943 |
py
|
Python
|
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
# condition to be cousin: (1) diff.parents (2) same level
stack=[(root, 0, -1)]
xlevel, ylevel = -1, -1
xparent, yparent = -1, -1
while(stack):
cur, depth, parent = stack.pop(0)
if cur.val==x:
xlevel, xparent = depth, parent
if cur.val==y:
ylevel, yparent = depth, parent
if cur.left:
stack.append((cur.left, depth+1, cur.val))
if cur.right:
stack.append((cur.right, depth+1, cur.val))
if xlevel==ylevel and xparent!=yparent:
return True
else:
return False
| 36.269231 | 74 | 0.520679 |
6613dec8d628000fe7b472846f82eac73bd8f3ea
| 49,047 |
py
|
Python
|
autojail/config/memory.py
|
ekut-es/autojail
|
bc16e40e6df55c0a28a3059715851ffa59b14ba8
|
[
"MIT"
] | 6 |
2020-08-12T08:16:15.000Z
|
2022-03-05T02:25:53.000Z
|
autojail/config/memory.py
|
ekut-es/autojail
|
bc16e40e6df55c0a28a3059715851ffa59b14ba8
|
[
"MIT"
] | 1 |
2021-03-30T10:34:51.000Z
|
2021-06-09T11:24:00.000Z
|
autojail/config/memory.py
|
ekut-es/autojail
|
bc16e40e6df55c0a28a3059715851ffa59b14ba8
|
[
"MIT"
] | 1 |
2021-11-21T09:30:58.000Z
|
2021-11-21T09:30:58.000Z
|
import copy
import logging
import math
import sys
from collections import defaultdict
from functools import reduce
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import tabulate
from ortools.sat.python import cp_model
from ..model import (
Board,
CellConfig,
DeviceMemoryRegion,
HypervisorMemoryRegion,
JailhouseConfig,
MemoryRegion,
MemoryRegionData,
ShMemNetRegion,
)
from ..model.datatypes import HexInt
from ..model.parameters import GenerateConfig, GenerateParameters, ScalarChoice
from ..utils import get_overlap
from .passes import BasePass
class MemoryAllocationInfeasibleException(Exception):
pass
class AllocatorSegment:
def __init__(
self,
name: str = "unnamed",
alignment: int = 2 ** 12,
shared_regions: Optional[
Dict[
str,
List[
Union[
MemoryRegion, DeviceMemoryRegion, HypervisorMemoryRegion
]
],
]
] = None,
) -> None:
self.name = name
self.shared_regions: Optional[
Dict[
str,
List[
Union[
MemoryRegion, DeviceMemoryRegion, HypervisorMemoryRegion
]
],
]
] = defaultdict(list)
if shared_regions:
self.shared_regions.update(shared_regions)
self.alignment = alignment
self.constraint: Optional[MemoryConstraint] = None
@property
def physical_start_addr(self):
key = self.shared_regions.keys()[0]
return self.shared_regions[key][0].physical_start_addr
@property
def size(self):
key = list(self.shared_regions)[0]
return sum(map(lambda r: r.size, self.shared_regions[key]))
class MemoryConstraint(object):
"""Implements a generic constraint for AllocatorSegments"""
def __init__(
self, size: int, virtual: bool, start_addr: int = None
) -> None:
self.size = size
self.virtual = virtual
self.start_addr: Optional[int] = start_addr
self.address_range: Optional[Tuple[int, int]] = None
# Addresses must be aligned such that
# addr % self.alignment == 0
self.alignment: Optional[int] = None
# Constraint for Memory regions where physical == virtual address
# E.g. mem loadable in root cell
self.equal_constraint: Optional["MemoryConstraint"] = None
# Solver Interval Variable
self.bound_vars: Optional[Tuple[Any, Any]] = None
# Values for the allocated range after constraint solving
self.allocated_range: Optional[Tuple[int, int]] = None
# allow arbitrary actions upon resolving a constraint
# this method is called iff, the solver found a valid
# solution and assigned start_addr
# Parameters:
# - self: MemoryConstraint
self.resolved: Optional[Callable[[MemoryConstraint], None]] = None
def __str__(self):
ret = ""
if self.start_addr is not None:
ret += f"addr: {hex(self.start_addr)} "
if self.address_range:
ret += f"range: {hex(self.address_range[0])}-{hex(self.address_range[1])} "
if self.alignment:
ret += f"alignment: {self.alignment} "
if self.address_range:
ret += f"allocated: {hex(self.address_range[0])}-{hex(self.adress_range[1])} "
ret += f"size: {self.size} virtual: {self.virtual}"
return ret
# Returns a constraint that satisfies both
# <self> and <other>, if possible
# Fails otherwise
def merge(self, other):
assert (
self.virtual == other.virtual
and "Unable to merge constraints for physical and virtual addresses"
)
assert (
self.size == other.size
and "Unable to merge constraints with different size"
)
assert (
self.start_addr == other.start_addr
and "Unbable to merge constraints with different start addresses"
)
alignment = self.alignment
if other.alignment:
if alignment:
alignment = (self.alignment * other.alignment) / math.gcd(
self.alignment, other.alignment
)
else:
alignment = other.alignment
resolved = self.resolved
if other.resolved:
if resolved:
def callback(mc: MemoryConstraint):
assert self.resolved
assert other.resolved
self.resolved(mc)
other.resolved(mc)
resolved = callback
else:
resolved = other.resolved
mc = MemoryConstraint(self.size, self.virtual)
mc.virtual = self.virtual
mc.start_addr = self.start_addr
mc.alignment = alignment
mc.resolved = resolved
class NoOverlapConstraint(object):
"""Implements a generic no-overlap constraint"""
def __init__(self) -> None:
self.constraints: List[MemoryConstraint] = []
def add_memory_constraint(self, mc: MemoryConstraint) -> None:
self.constraints.append(mc)
def __str__(self):
return str(self.__dict__)
class CPMemorySolver(object):
def __init__(
self,
constraints: List[NoOverlapConstraint],
physical_domain: cp_model.Domain,
virtual_domain: cp_model.Domain,
):
self.constraints = constraints
self.model = cp_model.CpModel()
self.physical_domain = physical_domain
self.virtual_domain = virtual_domain
self.ivars: Dict[cp_model.IntervalVar, MemoryConstraint] = dict()
self.vars: Dict[
cp_model.IntervalVar, Tuple[cp_model.IntVar, cp_model.IntVar]
] = dict()
self._build_cp_constraints()
def solve(self):
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
status = solver.Solve(self.model)
if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
for ivar, mc in self.ivars.items():
lower, upper = self.vars[ivar]
mc.allocated_range = solver.Value(lower), solver.Value(upper)
else:
print("Memory allocation infeasible")
raise MemoryAllocationInfeasibleException()
def _build_cp_constraints(self):
equal_pairs = []
for overlap_index, no_overlap in enumerate(self.constraints):
cp_no_overlap = []
for constr_index, constr in enumerate(no_overlap.constraints):
lower = None
upper = None
constr_name = f"constr_{overlap_index}_{constr_index}"
if constr.start_addr is not None:
lower = self.model.NewConstant(constr.start_addr)
upper = self.model.NewConstant(
constr.start_addr + constr.size
)
else:
if constr.address_range:
l_addr, u_addr = constr.address_range
lower = self.model.NewIntVar(
l_addr, u_addr, f"{constr_name}_lower"
)
else:
domain = self.physical_domain
if constr.virtual:
domain = self.virtual_domain
lower = self.model.NewIntVarFromDomain(
domain, f"{constr_name}_lower"
)
if constr.address_range:
l_addr, u_addr = constr.address_range
upper = self.model.NewIntVar(
l_addr, u_addr, f"{constr_name}_upper"
)
else:
domain = self.physical_domain
if constr.virtual:
domain = self.virtual_domain
upper = self.model.NewIntVarFromDomain(
domain, f"{constr_name}_upper"
)
ivar = self.model.NewIntervalVar(
lower, constr.size, upper, f"{constr_name}_ivar"
)
print(lower, constr.size, upper)
constr.bound_vars = (lower, upper)
if constr.alignment:
self.model.AddModuloEquality(0, lower, constr.alignment)
if constr.equal_constraint:
equal_pairs.append((constr, constr.equal_constraint))
cp_no_overlap.append(ivar)
self.ivars[ivar] = constr
self.vars[ivar] = (lower, upper)
self.model.AddNoOverlap(cp_no_overlap)
for first, second in equal_pairs:
self.model.Add(first.bound_vars[0] == second.bound_vars[0])
self.model.Add(first.bound_vars[1] == second.bound_vars[1])
class AllocateMemoryPass(BasePass):
"""Implements a simple MemoryAllocator for AutoJail"""
def __init__(self) -> None:
self.logger = logging.getLogger("autojail")
self.config: Optional[JailhouseConfig] = None
self.board: Optional[Board] = None
self.root_cell: Optional[CellConfig] = None
self.root_cell_id: Optional[str] = None
self.unallocated_segments: List[AllocatorSegment] = []
self.allocated_regions: List[MemoryRegionData] = []
self.per_region_constraints: Dict[str, MemoryConstraint] = dict()
# data structure for creating and handling generic
# constraints
self.physical_domain: cp_model.Domain = None
self.virtual_domain: cp_model.Domain = None
self.global_no_overlap = NoOverlapConstraint()
self.no_overlap_constraints: Dict[
str, NoOverlapConstraint
] = defaultdict(NoOverlapConstraint)
self.memory_constraints: Dict[
MemoryConstraint, AllocatorSegment
] = dict()
def _iter_constraints(self, f_no_overlap, f_mc):
for cell_name, no_overlap in self.no_overlap_constraints.items():
if not f_no_overlap(cell_name, no_overlap):
continue
for mc in no_overlap.constraints:
f_mc(cell_name, mc)
def _dump_constraints(self):
constraint_tables = {}
def f_no_overlap(
cell_name: str, no_overlap: NoOverlapConstraint
) -> bool:
constraint_tables[cell_name] = []
return True
def f_mc(cell_name: str, mc: MemoryConstraint) -> None:
constraint_tables[cell_name].append(
[
hex(mc.start_addr) if mc.start_addr is not None else "-",
hex(mc.address_range[0]) + "-" + hex(mc.address_range[1])
if mc.address_range
else "-",
str(mc.size) if mc.size is not None else "-",
str(mc.alignment) if mc.alignment else "-",
str(mc.virtual),
"yes" if mc.equal_constraint else "-",
str(mc.resolved) if mc.resolved else "-",
]
)
self._iter_constraints(f_no_overlap, f_mc)
self.logger.info("")
self.logger.info("Memory Constraints:")
for cell_name, constraints in constraint_tables.items():
self.logger.info("Cell: %s", cell_name)
formatted = tabulate.tabulate(
constraints,
headers=[
"Start Address",
"Start Address Range",
"Size",
"Alignment",
"Virtual?",
"Equal?",
"Resolved callback",
],
)
self.logger.info(formatted)
self.logger.info("")
def _check_constraints(self):
def f_no_overlap(cell_name, no_overlap):
full_regions = []
def insert_region(region):
o_start, o_end = region
for (start, end) in full_regions:
if (
(o_start <= start and start <= o_end)
or (o_start <= end and end <= o_end)
or (start <= o_start and o_start <= end)
or (start <= o_end and o_end <= end)
):
print(
f"Regions overlap for {cell_name}: (0x{start:x}, 0x{end:x}) and (0x{o_start:x}, 0x{o_end:x})"
)
if mc not in self.memory_constraints:
continue
seg = self.memory_constraints[mc]
print("Affected memory cells:")
for sharer in seg.shared_regions.keys():
print(f"\t{sharer}")
full_regions.append(region)
for mc in no_overlap.constraints:
if mc.start_addr is not None:
region = (mc.start_addr, mc.start_addr + mc.size - 1)
insert_region(region)
return False
def f_mc(cell_name, mc):
pass
self._iter_constraints(f_no_overlap, f_mc)
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
self.logger.info("Memory Allocator")
self.board = board
self.config = config
self.root_cell = None
for id, cell in self.config.cells.items():
if cell.type == "root":
self.root_cell = cell
self.root_cell_id = id
break
vmem_size = 2 ** 32
if self.board.virtual_address_bits > 32:
vmem_size = 2 ** (self.board.virtual_address_bits - 1)
self.virtual_domain = cp_model.Domain(0, vmem_size)
self._build_allocation_domain()
self.logger.info(
"Physical Memory Domain: %s",
str(self.physical_domain.FlattenedIntervals()),
)
self.logger.info(
"Virtual Memory domain: %s",
str(self.virtual_domain.FlattenedIntervals()),
)
self.no_overlap_constraints["__global"] = self.global_no_overlap
self.unallocated_segments = self._build_unallocated_segments()
self._lift_loadable()
self._preallocate_vpci()
self.logger.info("")
self.logger.info("Unallocated physical segments: ")
table = [
[
s.name,
s.size,
len(s.shared_regions if s.shared_regions else []),
",".join(s.shared_regions.keys() if s.shared_regions else []),
]
for s in self.unallocated_segments
]
self.logger.info(
tabulate.tabulate(
table,
headers=["Name", "Size (Byte)", "# Subregions", "Sharers"],
)
)
for seg in self.unallocated_segments:
assert seg.size > 0
assert seg.shared_regions
mc_global = None
for sharer, regions in seg.shared_regions.items():
mc_seg = seg.constraint
mc_local = MemoryConstraint(seg.size, True)
if mc_seg and mc_seg.alignment:
mc_local.alignment = mc_seg.alignment
else:
if regions[0].virtual_start_addr is None:
mc_local.alignment = seg.alignment
fst_region = regions[0]
if fst_region.virtual_start_addr is not None:
if mc_seg and mc_seg.start_addr and mc_seg.virtual:
assert (
mc_seg.start_addr == fst_region.virtual_start_addr
and "Invalid state detected: start addresses must be equal"
)
mc_local.start_addr = fst_region.virtual_start_addr
elif mc_seg and mc_seg.start_addr and mc_seg.virtual:
mc_local.start_addr = mc_seg.start_addr
if mc_seg and mc_seg.virtual:
mc_local.resolved = mc_seg.resolved
if not mc_global:
mc_global = copy.deepcopy(mc_local)
mc_global.virtual = False
mc_global.start_addr = None
if fst_region.physical_start_addr is not None:
if mc_seg and mc_seg.start_addr and not mc_seg.virtual:
assert (
mc_seg.start_addr
== fst_region.virtual_start_addr
and "Invalid state detected: start addresses must be equal"
)
mc_global.start_addr = fst_region.physical_start_addr
elif mc_seg and mc_seg.start_addr and not mc_seg.virtual:
mc_global.start_addr = mc_seg.start_addr
if mc_seg and not mc_seg.virtual:
mc_global.resolved = mc_seg.resolved
if mc_global.start_addr and mc_global.size:
print(
f"Adding global no-overlapp (shared): [0x{mc_global.start_addr:x}, 0x{mc_global.start_addr + mc_global.size:x}]"
)
self.global_no_overlap.add_memory_constraint(mc_global)
self.memory_constraints[mc_global] = seg
# Add physical == virtual constraint for MEM_LOADABLEs in root cell
if sharer == self.root_cell_id:
is_loadable = False
for shared_regions in seg.shared_regions.values():
for shared_region in shared_regions:
if isinstance(shared_region, MemoryRegionData):
for flag in shared_region.flags:
if flag == "MEM_LOADABLE":
is_loadable = True
if is_loadable:
mc_local.equal_constraint = mc_global
self.no_overlap_constraints[sharer].add_memory_constraint(
mc_local
)
self.memory_constraints[mc_local] = seg
# Add virtually reserved segments
for cell_name, cell in self.config.cells.items():
assert cell.memory_regions is not None
for memory_region in cell.memory_regions.values():
assert memory_region is not None
if isinstance(memory_region, HypervisorMemoryRegion):
continue
if isinstance(memory_region, ShMemNetRegion):
continue
assert isinstance(memory_region, MemoryRegionData)
if (
memory_region.virtual_start_addr is not None
and memory_region.physical_start_addr is not None
):
if memory_region.allocatable:
continue
assert memory_region.size is not None
memory_constraint = MemoryConstraint(
size=int(memory_region.size),
virtual=True,
start_addr=memory_region.virtual_start_addr,
)
self.no_overlap_constraints[
cell_name
].add_memory_constraint(memory_constraint)
self._add_gic_constraints()
self._dump_constraints()
solver = CPMemorySolver(
list(self.no_overlap_constraints.values()),
self.physical_domain,
self.virtual_domain,
)
try:
solver.solve()
except MemoryAllocationInfeasibleException:
self._check_constraints()
sys.exit(-1)
for cell_name, no_overlap_constr in self.no_overlap_constraints.items():
for constr in no_overlap_constr.constraints:
if not constr.allocated_range:
print(constr, "has not been allocated")
continue
(start, _) = constr.allocated_range
if constr.resolved:
constr.resolved(constr)
if constr not in self.memory_constraints:
continue
seg = self.memory_constraints[constr]
if cell_name == "__global":
assert seg.shared_regions
for _, regions in seg.shared_regions.items():
for region in regions:
if region.physical_start_addr is None:
region.physical_start_addr = HexInt(start)
else:
assert seg.shared_regions
assert constr.virtual
for region in seg.shared_regions[cell_name]:
if region.virtual_start_addr is None:
region.virtual_start_addr = HexInt(start)
self._remove_allocatable()
return self.board, self.config
def _add_gic_constraints(self):
interrupt_ranges: List[Tuple[int, int]] = []
for interrupt_controller in self.board.interrupt_controllers:
if interrupt_controller.gic_version == 2:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x1000)
)
interrupt_ranges.append(
(interrupt_controller.gicc_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gich_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gicv_base, 0x2000)
)
elif interrupt_controller.gic_version == 3:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x10000)
)
interrupt_ranges.append(
(interrupt_controller.gicr_base, 0x20000)
)
for name, constraint in self.no_overlap_constraints.items():
for interrupt_range in interrupt_ranges:
mc = MemoryConstraint(
size=interrupt_range[1],
start_addr=interrupt_range[0],
virtual=False if name == "__global" else True,
)
constraint.add_memory_constraint(mc)
def _lift_loadable(self):
root_cell = self.root_cell
for cell_name, cell in self.config.cells.items():
if cell.type == "root":
continue
for name, region in cell.memory_regions.items():
if region.flags and "MEM_LOADABLE" in region.flags:
root_region_name = f"{name}@{cell_name}"
print("Adding region:", root_region_name, "to root cell")
copy_region = copy.deepcopy(region)
copy_region.flags.remove("MEM_LOADABLE")
if "MEM_EXECUTE" in copy_region.flags:
copy_region.flags.remove("MEM_EXECUTE")
if "MEM_DMA" in copy_region.flags:
copy_region.flags.remove("MEM_DMA")
# FIXME: is it really true, that that MEM_LOADABLE must be the same at their respective memory region
copy_region.virtual_start_addr = (
copy_region.physical_start_addr
)
root_cell.memory_regions[root_region_name] = copy_region
for seg in self.unallocated_segments:
if cell_name not in seg.shared_regions:
continue
if region not in seg.shared_regions[cell_name]:
continue
seg.shared_regions["root"].append(copy_region)
def _build_allocation_domain(self) -> None:
assert self.root_cell is not None
assert self.root_cell.memory_regions is not None
assert self.board is not None
start = None
end = 0
allocatable_regions = []
for region in self.board.memory_regions.values():
assert region is not None
if isinstance(region, MemoryRegionData) and region.allocatable:
assert region.physical_start_addr is not None
assert region.size is not None
allocatable_regions.append(region)
tmp_start = region.physical_start_addr
tmp_end = region.physical_start_addr + region.size
if start is None:
start = tmp_start
if tmp_start < start:
start = tmp_start
if tmp_end > end:
end = tmp_end
allocatable_regions.sort(
key=lambda r: r.physical_start_addr
if r.physical_start_addr is not None
else 0
)
holes: List[List[int]] = []
for i in range(0, len(allocatable_regions) - 1):
r0 = allocatable_regions[i]
r1 = allocatable_regions[i + 1]
assert r0.physical_start_addr is not None and r0.size is not None
assert r1.physical_start_addr is not None
r0_end = r0.physical_start_addr + r0.size
r1_start = r1.physical_start_addr
if r0_end != r1_start:
holes.append([r0_end, r1_start])
# Physical domain spans the entire range from the first allocatable memory region
# to the end of the last one. Any holes in that range are accomodated for using
# constant interval constraints
def remove_hole(start, end):
try:
holes.remove([start, end])
except ValueError:
pass
self.physical_domain = cp_model.Domain.FromIntervals([[start, end]])
# Make sure all pre-allocated regions part of a cell have a corresponding
# constraint (technically, we only need constraints for those regions that
# overlapp with the allocatable range/physical domain)
non_alloc_ranges: List[List[int]] = []
assert self.config
for cell in self.config.cells.values():
assert cell.memory_regions
for r in cell.memory_regions.values():
if not isinstance(r, ShMemNetRegion) and not isinstance(
r, MemoryRegion
):
continue
if r.physical_start_addr is not None:
assert r.size is not None
end = r.physical_start_addr + r.size
non_alloc_range = [r.physical_start_addr, end]
if non_alloc_range in non_alloc_ranges:
continue
if not self.physical_domain.Contains(
non_alloc_range[0]
) and not self.physical_domain.Contains(non_alloc_range[1]):
continue
non_alloc_ranges.append(non_alloc_range)
remove_hole(r.physical_start_addr, end)
mc = MemoryConstraint(r.size, False, r.physical_start_addr)
self.global_no_overlap.add_memory_constraint(mc)
# fill remaining holes in between allocatable regions
for hole in holes:
s, e = hole
size = e - s
mc = MemoryConstraint(size, False, s)
self.global_no_overlap.add_memory_constraint(mc)
def _remove_allocatable(self):
"""Finally remove allocatable memory regions from cells"""
assert self.config is not None
for cell in self.config.cells.values():
delete_list = []
for name, region in cell.memory_regions.items():
if isinstance(region, MemoryRegionData):
if region.allocatable:
delete_list.append(name)
for name in delete_list:
del cell.memory_regions[name]
def _build_unallocated_segments(
self, key: Callable = lambda x: x.physical_start_addr
) -> List[AllocatorSegment]:
assert self.config
assert self.config.cells
ana = UnallocatedOrSharedSegmentsAnalysis(
self.root_cell,
self.config.cells,
self.logger,
self.per_region_constraints,
self.physical_domain,
key,
)
ana.run()
unallocated = ana.unallocated
assert unallocated
return unallocated
def _preallocate_vpci(self):
"""Preallocate a virtual page on all devices"""
assert self.config is not None
if self.root_cell and self.root_cell.platform_info:
# see hypvervisor/pci.c:850
end_bus = self.root_cell.platform_info.pci_mmconfig_end_bus
vpci_size = (end_bus + 2) * 256 * 4096
if self.root_cell.platform_info.pci_mmconfig_base:
for constraints in self.no_overlap_constraints.values():
mc = MemoryConstraint(
vpci_size,
True,
self.root_cell.platform_info.pci_mmconfig_base,
)
constraints.add_memory_constraint(mc)
else:
def callback(mc: MemoryConstraint):
assert mc.allocated_range
assert self.root_cell
assert self.root_cell.platform_info
assert self.root_cell.memory_regions
physical_start_addr, _ = mc.allocated_range
self.root_cell.platform_info.pci_mmconfig_base = HexInt(
physical_start_addr
)
self.logger.info(
"Print resolved pci_mmconfig %s",
hex(physical_start_addr),
)
# Allocate vpci physically
last_mc = MemoryConstraint(
vpci_size, True
) # This is a physical constraint, but it does not need to be backed by allocatable memory
last_mc.resolved = callback
last_mc.alignment = self.board.pagesize
last_mc.address_range = (0x0, 2 ** 32 - 1)
self.no_overlap_constraints["__global"].add_memory_constraint(
last_mc
)
for cell_name in self.config.cells.keys():
mc = MemoryConstraint(vpci_size, True)
mc.equal_constraint = last_mc
self.no_overlap_constraints[
cell_name
].add_memory_constraint(mc)
mc.alignment = self.board.pagesize
last_mc = mc
class UnallocatedOrSharedSegmentsAnalysis(object):
""" Group unallocated memory regions into segments
that are allocated continuously.
Detect (un-)allocated regions that are shared
between cells
"""
def __init__(
self,
root_cell,
cells,
logger,
per_region_constraints,
physical_domain,
key=lambda x: x.physical_start_addr,
) -> None:
self.root_cell: CellConfig = root_cell
self.cells: Dict[str, CellConfig] = cells
self.logger = logger
self.key = key
self.per_region_constraints = per_region_constraints
self.physical_domain: Optional[cp_model.Domain] = physical_domain
# result store
self.unallocated: List[AllocatorSegment] = []
self.shared: Dict[str, AllocatorSegment] = {}
def _detect_shared_memio(self):
shared: Dict[
Tuple[int, int], Tuple[int, List[MemoryRegionData]]
] = defaultdict(lambda: (0, []))
for cell in self.cells.values():
for region in cell.memory_regions.values():
if not isinstance(region, MemoryRegionData):
continue
if not self.key(region) or "MEM_IO" not in region.flags:
continue
start = region.physical_start_addr
key = (start, region.size)
count, regions = shared[key]
regions.append(region)
shared[key] = (count + 1, regions)
for count, regions in shared.values():
if count > 1:
for region in regions:
region.shared = True
def _log_shared_segments(self):
self.logger.info("Shared segments:")
for name, seg in self.shared.items():
self.logger.info(f"Region: '{name}' shared by")
for cell_name in seg.shared_regions:
self.logger.info(f"\t{cell_name}")
self.logger.info("\n")
def run(self) -> None:
assert self.root_cell is not None
assert self.cells is not None
self._detect_shared_memio()
# Add cell memories
self.logger.debug("building allocatable regions")
for cell_name, cell in self.cells.items():
assert cell is not None
assert cell.memory_regions is not None
for region_name, region in cell.memory_regions.items():
if not isinstance(region, MemoryRegionData):
continue
if region.allocatable:
continue
assert self.shared is not None
if region.shared and region_name in self.shared:
current_segment = self.shared[region_name]
assert current_segment.shared_regions
current_segment.shared_regions[cell_name].append(region)
if region_name in self.per_region_constraints:
constraint = self.per_region_constraints[region_name]
if current_segment.constraint:
constraint = constraint.merge(
current_segment.constraint
)
current_segment.constraint = constraint
else:
current_segment = AllocatorSegment(
region_name, shared_regions={cell_name: [region]},
)
if region_name in self.per_region_constraints:
current_segment.constraint = self.per_region_constraints[
region_name
]
if region.physical_start_addr is None:
self.unallocated.append(current_segment)
# TODO are shared regions required to have
# the same name accross cells?
if region.shared:
self.shared[region_name] = current_segment
# Add hypervisor memories
hypervisor_memory = self.root_cell.hypervisor_memory
assert isinstance(hypervisor_memory, HypervisorMemoryRegion)
if hypervisor_memory.physical_start_addr is None:
self.unallocated.append(
AllocatorSegment(
"hypervisor_memory",
alignment=hypervisor_memory.size, # FIXME: this is too much alignment
shared_regions={"hypervisor": [hypervisor_memory]},
)
)
self._log_shared_segments()
class MergeIoRegionsPass(BasePass):
""" Merge IO regions in root cell that are at most n kB apart.
n defaults to 64 kb
"""
def __init__(
self,
set_params: Optional[GenerateConfig],
gen_params: Optional[GenerateParameters],
) -> None:
self.config: Optional[JailhouseConfig] = None
self.board: Optional[Board] = None
self.root_cell: Optional[CellConfig] = None
self.logger = logging.getLogger("autojail")
self.max_dist = 64 * 1024
if set_params:
self.max_dist = set_params.mem_io_merge_threshold
if gen_params:
threshold_choice = ScalarChoice()
threshold_choice.lower = 1024
threshold_choice.upper = 64 * 1024 * 1024
threshold_choice.step = 1024
threshold_choice.integer = True
threshold_choice.log = True
gen_params.mem_io_merge_threshold = threshold_choice
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
self.logger.info("Merge IO Regions")
self.board = board
self.config = config
for cell in self.config.cells.values():
if cell.type == "root":
self.root_cell = cell
assert self.root_cell
assert self.root_cell.memory_regions
shared_regions_ana = UnallocatedOrSharedSegmentsAnalysis(
self.root_cell,
self.config.cells,
self.logger,
dict(),
None,
key=lambda region: region.physical_start_addr,
)
shared_regions_ana.run()
def get_io_regions(
regions: Dict[
str,
Union[str, ShMemNetRegion, MemoryRegion, DeviceMemoryRegion],
]
) -> List[Tuple[str, Union[DeviceMemoryRegion, MemoryRegion]]]:
return list(
[
(name, r)
for name, r in regions.items()
if isinstance(r, MemoryRegionData) and "MEM_IO" in r.flags
]
)
regions: Sequence[Tuple[str, MemoryRegionData]] = get_io_regions(
self.root_cell.memory_regions
)
regions = sorted(
regions,
key=lambda t: t[1].physical_start_addr
if t[1].physical_start_addr is not None
else 0,
)
grouped_regions: List[List[Tuple[str, MemoryRegionData]]] = []
current_group: List[Tuple[str, MemoryRegionData]] = []
max_dist = self.max_dist
vpci_start_addr = None
vpci_end_addr = None
if (
self.root_cell.platform_info is not None
and self.root_cell.platform_info.pci_mmconfig_base is not None
and self.root_cell.platform_info.pci_mmconfig_base > 0
):
vpci_start_addr = self.root_cell.platform_info.pci_mmconfig_base
vpci_end_addr = (
vpci_start_addr
+ (self.root_cell.platform_info.pci_mmconfig_end_bus + 1)
* 256
* 4096
)
for name, r in regions:
assert r.physical_start_addr is not None
assert r.size is not None
if current_group:
r1_end = r.physical_start_addr + r.size
r1_start = r.physical_start_addr
assert current_group[-1][1].physical_start_addr is not None
assert current_group[-1][1].size is not None
assert current_group[0][1].physical_start_addr is not None
last_region_end = (
current_group[-1][1].physical_start_addr
+ current_group[-1][1].size
)
# Do not merge regions if merged regions would
# overlap with gic
gic_overlap = False
interrupt_ranges = []
for interrupt_controller in board.interrupt_controllers:
if interrupt_controller.gic_version == 2:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x1000)
)
interrupt_ranges.append(
(interrupt_controller.gicc_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gich_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gicv_base, 0x2000)
)
elif interrupt_controller.gic_version == 3:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x10000)
)
interrupt_ranges.append(
(interrupt_controller.gicr_base, 0x20000)
)
for interrupt_range in interrupt_ranges:
if (
current_group[0][1].physical_start_addr
< interrupt_range[0] + interrupt_range[1]
):
if r1_end > interrupt_range[0]:
gic_overlap = True
break
vpci_overlap = False
if vpci_start_addr is not None and vpci_end_addr is not None:
if (
get_overlap(
(r1_start, r1_end), (vpci_start_addr, vpci_end_addr)
)
> 0
):
vpci_overlap = True
if (
r1_start - last_region_end > max_dist
or gic_overlap
or vpci_overlap
):
grouped_regions.append(current_group)
if not gic_overlap and not vpci_overlap:
current_group = [(name, r)]
else:
current_group = []
else:
current_group.append((name, r))
else:
current_group.append((name, r))
if current_group:
grouped_regions.append(current_group)
self.logger.info(f"Got {len(grouped_regions)} grouped region(s):")
for group in grouped_regions:
assert group[0][1].physical_start_addr is not None
assert group[-1][1].physical_start_addr is not None
assert group[-1][1].size is not None
group_begin = group[0][1].physical_start_addr
group_end = group[-1][1].physical_start_addr + group[-1][1].size
self.logger.info(
f"Group-Begin: (0x{group_begin:x} - 0x{group_end:x})"
)
for region in group:
self.logger.info(f"\t{region}")
self.logger.info("Group-End\n")
for index, regions in enumerate(grouped_regions):
r_start = regions[0][1]
r_end = regions[-1][1]
assert r_start.physical_start_addr is not None
assert r_end.size is not None
assert r_end.physical_start_addr is not None
new_size = (
r_end.physical_start_addr + r_end.size
) - r_start.physical_start_addr
def aux(
acc: Iterable[str], t: Tuple[str, MemoryRegionData]
) -> Iterable[str]:
_, r = t
return set(acc) | set(r.flags)
init: Iterable[str] = set()
flags: List[str] = sorted(list(reduce(aux, regions, init)))
physical_start_addr = r_start.physical_start_addr
virtual_start_addr = r_start.virtual_start_addr
new_region = MemoryRegion(
size=new_size,
physical_start_addr=physical_start_addr,
virtual_start_addr=virtual_start_addr,
flags=flags,
allocatable=False,
shared=False,
)
assert self.root_cell.memory_regions
for name, _ in regions:
del self.root_cell.memory_regions[name]
self.root_cell.memory_regions[f"mmio_{index}"] = new_region
return (self.board, self.config)
class PrepareMemoryRegionsPass(BasePass):
""" Prepare memory regions by merging regions from Extracted Board Info and Cell Configuration"""
def __init__(self) -> None:
self.config: Optional[JailhouseConfig] = None
self.board: Optional[Board] = None
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
self.board = board
self.config = config
assert self.board is not None
assert self.config is not None
for cell in self.config.cells.values():
assert cell.memory_regions is not None
for region in cell.memory_regions.values():
if isinstance(region, MemoryRegionData) and region.size is None:
region.size = self.board.pagesize
if cell.type == "root":
self._prepare_memory_regions_root(cell)
return self.board, self.config
def _prepare_memory_regions_root(self, cell: CellConfig) -> None:
assert self.board is not None
assert self.board.memory_regions is not None
assert cell.memory_regions is not None
allocatable_ranges = []
for region in self.board.memory_regions.values():
if region.allocatable:
assert region.size is not None
assert region.physical_start_addr is not None
start = region.physical_start_addr
end = start + region.size
allocatable_ranges.append([start, end])
allocatable_ranges.sort(key=lambda r: r[0])
def overlaps_allocatable_region(start, end):
for r in allocatable_ranges:
if (
r[0] <= start
and start <= r[1]
or r[0] <= end
and end <= r[1]
):
return True
return False
for name, memory_region in self.board.memory_regions.items():
if memory_region.physical_start_addr is None:
continue
if memory_region.virtual_start_addr is None:
continue
if memory_region.size is None:
continue
p_start = memory_region.physical_start_addr
v_start = memory_region.virtual_start_addr
p_end = memory_region.physical_start_addr + memory_region.size
v_end = memory_region.virtual_start_addr + memory_region.size
assert p_start is not None
assert v_start is not None
assert p_end is not None
assert v_end is not None
if overlaps_allocatable_region(p_start, p_end):
continue
skip = False
for cell_region in cell.memory_regions.values():
if not isinstance(cell_region, MemoryRegionData):
continue
assert cell_region.size is not None
if cell_region.physical_start_addr is not None:
if (
p_start >= cell_region.physical_start_addr
and p_start
< cell_region.physical_start_addr + cell_region.size
):
skip = True
if (
p_end >= cell_region.physical_start_addr
and p_end
< cell_region.physical_start_addr + cell_region.size
):
skip = True
if cell_region.virtual_start_addr is not None:
if (
v_start >= cell_region.virtual_start_addr
and v_start
< cell_region.virtual_start_addr + cell_region.size
):
skip = True
if (
v_end >= cell_region.virtual_start_addr
and v_end
< cell_region.virtual_start_addr + cell_region.size
):
skip = True
if skip is True:
continue
cell.memory_regions[name] = memory_region
| 35.438584 | 140 | 0.538728 |
b1c431a1f0a698ee3cb88df0ac882e928a41cf16
| 1,133 |
py
|
Python
|
CS303/lab4-6/work/algorithm_ncs/ncs_client.py
|
Wycers/Codelib
|
86d83787aa577b8f2d66b5410e73102411c45e46
|
[
"MIT"
] | 22 |
2018-08-07T06:55:10.000Z
|
2021-06-12T02:12:19.000Z
|
CS303_Artifical-Intelligence/NCS/algorithm_ncs/ncs_client.py
|
Eveneko/SUSTech-Courses
|
0420873110e91e8d13e6e85a974f1856e01d28d6
|
[
"MIT"
] | 28 |
2020-03-04T23:47:22.000Z
|
2022-02-26T18:50:00.000Z
|
CS303/lab4-6/work/algorithm_ncs/ncs_client.py
|
Wycers/Codelib
|
86d83787aa577b8f2d66b5410e73102411c45e46
|
[
"MIT"
] | 4 |
2019-11-09T15:41:26.000Z
|
2021-10-10T08:56:57.000Z
|
import json
from algorithm_ncs import ncs_c as ncs
import argparse
parser = argparse.ArgumentParser(description="This is a NCS solver")
parser.add_argument("-c", "--config", default="algorithm_ncs/parameter.json", type=str, help="a json file that contains parameter")
parser.add_argument("-d", "--data", default="6", type=int, help="the problem dataset that need to be solved")
args = parser.parse_args()
"""
how to use it?
example:
python3 -m algorithm_ncs.ncs_client -d 12 -c algorithm_ncs/parameter.json
good luck!
"""
if __name__ == '__main__':
config_file = args.config
p = args.data
with open(config_file) as file:
try:
ncs_para = json.loads(file.read())
except:
raise Exception("not a json format file")
_lambda = ncs_para["lambda"]
r = ncs_para["r"]
epoch = ncs_para["epoch"]
n= ncs_para["n"]
ncs_para = ncs.NCS_CParameter(tmax=300000, lambda_exp=_lambda, r=r, epoch=epoch, N=n)
print("************ start problem %d **********" % p)
ncs_c = ncs.NCS_C(ncs_para, p)
ncs_res = ncs_c.loop(quiet=False, seeds=0)
print(ncs_res)
| 29.815789 | 131 | 0.655781 |
59d8eb391e49f53fa6bd3a0f7066dcb0749c813d
| 4,373 |
py
|
Python
|
app/check.py
|
MePyDo/pygqa
|
61cde42ee815968fdd029cc5056ede3badea3d91
|
[
"MIT"
] | 3 |
2021-02-25T13:19:52.000Z
|
2021-03-03T03:46:46.000Z
|
app/check.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | null | null | null |
app/check.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.0"
__status__ = "Prototype"
import matplotlib.pyplot as plt
from isp.plot import plotClass
import logging
logger = logging.getLogger( "MQTT" )
class ispCheckClass( plotClass ):
""" Hilfsfunktionen für alle check Module
Attributes
----------
image : instance of BaseImage
baseImage : instance of BaseImage
das zum normalisieren zu verwendende Bild
infos : dict
die infos aus self.image.infos
checkField : dict
die für die Tests zu verwendende Bildinformatioen
baseField : dict
die für das normalisieren zu verwendende Bildinformatioen
"""
def __init__( self, image=None, baseImage=None, normalize="none" ):
"""Check Klasse initialisieren
"""
#self.checkField = None
self.image = image
self.baseImage = baseImage
#self.baseField = None
self.infos = None
# ist auch das baseImage da dann ggf normalisieren
if not self.baseImage == None:
self.normalize( normalize )
# infos auch über die eigene Klasse erreichbar machen
if not self.image == None:
self.infos = self.image.infos
#print("ispCheckClass.__init__",self.baseImage, normalize)
def show(self):
'''
Ruft plt.show auf um die erzeugten Grafiken auszugeben
Returns
-------
None.
'''
plt.show()
def normalize( self, normalize: str="diff" ):
'''Normalisiert checkField mit baseField
in self.image.array liegen anschließend die normalisierten Daten
Parameters
----------
normalize : str, optional
Art der Normalisierung. The default is "diff".
- none: keine Normalisierung durchführen
- diff: test / open
- prozent: (test - open) / open
Returns
-------
None.
'''
# image.array als image.arrayOriginal merken
self.image.arrayOriginal = self.image.array.copy()
#print("### ispCheckClass.normalize", self.image, self.baseImage, normalize)
"""
if basefilename:
if self.debug:
print("---------------------------")
print("OpenImage: %s, min: %1.3f, max: %1.3f, DPMM: %1.3f, DPI: %1.3f, CAX-x: %1.3f CAX-y:%1.3f"
% (self.openfilename, np.amin(openImg.array), np.amax(openImg.array),
openImg.dpmm, openImg.dpi, openImg.cax.x, openImg.cax.y ) )
self.printMetaInfo( openImg.metadata )
if self.debug:
print("---------------------------")
print("CheckImage: %s, min: %1.3f, max: %1.3f, DPMM: %1.3f, DPI: %1.3f, CAX-x: %1.3f CAX-y:%1.3f"
% (testfilename, np.amin(checkImage.array), np.amax(checkImage.array),
checkImage.dpmm, checkImage.dpi, checkImage.cax.x, checkImage.cax.y ) )
self.printMetaInfo( checkImage.metadata )
"""
base = self.baseImage.array.copy()
check = self.image.array.copy()
if normalize == "diff":
# Beide Arrays um 0.000001 erhöhen und geschlossenes durch offene teilen
self.image.array = (check + 0.000001) / (base + 0.000001)
elif normalize == "prozent":
self.image.array = ( (check + 0.000001) - (base + 0.000001) ) / (base + 0.000001)
def getMeanDose( self, field=None ):
"""Die mittlere Dosis eines Angegebenen Bereichs ermitteln
"""
if not field: # pragma: no cover
field = { "X1":-2, "X2": 2, "Y1": -2, "Y2":2 }
# holt den angegebenen Bereich um dort die Dosis zu bestimmen
roi = self.image.getRoi( field ).copy()
#print( roi.mean() )
return roi.mean()
#print( self.metadata )
| 31.460432 | 209 | 0.552938 |
fb2fabace401a8d0a972f811af8b0a86ed348c85
| 2,951 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/regional/india/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/india/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/india/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
import frappe, re
from frappe import _
from frappe.utils import cstr
from erpnext.regional.india import states, state_numbers
from erpnext.controllers.taxes_and_totals import get_itemised_tax, get_itemised_taxable_amount
def validate_gstin_for_india(doc, method):
if not hasattr(doc, 'gstin'):
return
if doc.gstin:
doc.gstin = doc.gstin.upper()
if doc.gstin != "NA":
p = re.compile("[0-9]{2}[a-zA-Z]{5}[0-9]{4}[a-zA-Z]{1}[1-9A-Za-z]{1}[Z]{1}[0-9a-zA-Z]{1}")
if not p.match(doc.gstin):
frappe.throw(_("Invalid GSTIN or Enter NA for Unregistered"))
if not doc.gst_state:
if doc.state in states:
doc.gst_state = doc.state
if doc.gst_state:
doc.gst_state_number = state_numbers[doc.gst_state]
if doc.gstin and doc.gstin != "NA" and doc.gst_state_number != doc.gstin[:2]:
frappe.throw(_("First 2 digits of GSTIN should match with State number {0}")
.format(doc.gst_state_number))
def get_itemised_tax_breakup_header(item_doctype, tax_accounts):
if frappe.get_meta(item_doctype).has_field('gst_hsn_code'):
return [_("HSN/SAC"), _("Taxable Amount")] + tax_accounts
else:
return [_("Item"), _("Taxable Amount")] + tax_accounts
def get_itemised_tax_breakup_data(doc):
itemised_tax = get_itemised_tax(doc.taxes)
itemised_taxable_amount = get_itemised_taxable_amount(doc.items)
if not frappe.get_meta(doc.doctype + " Item").has_field('gst_hsn_code'):
return itemised_tax, itemised_taxable_amount
item_hsn_map = frappe._dict()
for d in doc.items:
item_hsn_map.setdefault(d.item_code or d.item_name, d.get("gst_hsn_code"))
hsn_tax = {}
for item, taxes in itemised_tax.items():
hsn_code = item_hsn_map.get(item)
hsn_tax.setdefault(hsn_code, frappe._dict())
for tax_account, tax_detail in taxes.items():
hsn_tax[hsn_code].setdefault(tax_account, {"tax_rate": 0, "tax_amount": 0})
hsn_tax[hsn_code][tax_account]["tax_rate"] = tax_detail.get("tax_rate")
hsn_tax[hsn_code][tax_account]["tax_amount"] += tax_detail.get("tax_amount")
# set taxable amount
hsn_taxable_amount = frappe._dict()
for item, taxable_amount in itemised_taxable_amount.items():
hsn_code = item_hsn_map.get(item)
hsn_taxable_amount.setdefault(hsn_code, 0)
hsn_taxable_amount[hsn_code] += itemised_taxable_amount.get(item)
return hsn_tax, hsn_taxable_amount
def set_place_of_supply(doc, method):
if not frappe.get_meta('Address').has_field('gst_state'): return
if doc.doctype in ("Sales Invoice", "Delivery Note"):
address_name = doc.shipping_address_name or doc.customer_address
elif doc.doctype == "Purchase Invoice":
address_name = doc.shipping_address or doc.supplier_address
if address_name:
address = frappe.db.get_value("Address", address_name, ["gst_state", "gst_state_number"], as_dict=1)
doc.place_of_supply = cstr(address.gst_state_number) + "-" + cstr(address.gst_state)
# don't remove this function it is used in tests
def test_method():
'''test function'''
return 'overridden'
| 36.8875 | 102 | 0.74856 |
34ac94f8711db1745f63a3c064eaa86f3dde0de5
| 2,772 |
py
|
Python
|
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# Übung 11 - Aufgabe 1
# Mitarbeiter-Kartei
# Bereitgestellt von M. Drews im Wintersemester 2021/22
# Funktionen
def trenner(anzahl_striche):
for i in range(anzahl_striche):
print("-", end="")
print()
def fehler():
print("\nFehler: Bitte geben Sie nur Zahlen an, die zur Auswahl stehen.")
def formular():
global vorname, nachname, geburtsort
vorname = input("> Vorname: ")
nachname = input("> Nachname: ")
geburtsort = input("> Geburtsort: ")
def suche():
global index
suche = input("Suchbegriff (Nachname eingeben): ")
index = next((i for i, item in enumerate(ma_kartei) if item["Nachname"] == suche), None)
def eintrag_neu():
print("\nBitte fügen Sie einen neuen Eintrag zur Mitarbeiter-Kartei hinzu: ")
formular()
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Speichern (2) Abbrechen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
ma_kartei.append(eintrag)
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
elif auswahl == 2:
gueltige_eingabe = True
except:
fehler()
def eintrag_bearbeiten():
print("Welchen Eintrag möchten Sie bearbeiten?")
suche()
print("\nBitte überschreiben Sie den alten Eintrag:")
formular()
ma_kartei[index] = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
def eintrag_loeschen():
print("Welchen Eintrag möchten Sie löschen?")
suche()
print("\nFolgender Eintrag wurde gelöscht:")
print(ma_kartei[index])
ma_kartei.pop(index)
# Programmablauf
print("\n")
trenner(120)
print("Mitarbeiter-Kartei")
trenner(120)
trenner(120)
ma_kartei = []
programm = True
while programm:
print("Was möchten Sie tun?")
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Eintrag hinzufügen\n(2) Eintrag bearbeiten\n(3) Eintrag löschen\n(4) Kartei anzeigen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag_neu()
elif auswahl == 2:
gueltige_eingabe = True
eintrag_bearbeiten()
elif auswahl == 3:
gueltige_eingabe = True
eintrag_loeschen()
elif auswahl == 4:
gueltige_eingabe = True
print(ma_kartei)
trenner(80)
except:
fehler()
| 28.875 | 128 | 0.599206 |
1fd7ed8a83b56f175881d6f318fa389d67ee450a
| 732 |
py
|
Python
|
bewerte/muendlich.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/muendlich.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/muendlich.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
"""Berechnet die mündliche Note"""
import csv
with open('bewertung.csv', encoding='utf-8', mode='r') as bewertung:
TABELLE = []
DATA = csv.reader(bewertung, delimiter=',')
for row in DATA:
TABELLE.append([element.strip() for element in row])
OUTPUT = [TABELLE[0] + ["Note"]]
del TABELLE[0]
for row in TABELLE:
if len(row) > 3:
note = 20*float(row[2]) + 20*float(row[3]) + 40*float(row[4]) + 20*float(row[5])
note = round(note/25, 0)/4
row = row + [note]
OUTPUT.append(row)
with open('note.csv', encoding='utf-8', mode='w') as safe:
WRITER = csv.writer(safe, delimiter=',')
for row in OUTPUT:
WRITER.writerow(row)
| 31.826087 | 92 | 0.562842 |
1f5755adc834fa964d8b57abac91fbc6499d9935
| 4,608 |
py
|
Python
|
menucard/migrations/0001_initial.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | 1 |
2021-01-23T21:42:10.000Z
|
2021-01-23T21:42:10.000Z
|
menucard/migrations/0001_initial.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
menucard/migrations/0001_initial.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-12-27 10:36
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
('crm', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Vorspeise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Snacks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Nachspeise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Hauptspeise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Besucher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vorname', models.CharField(max_length=45)),
('nachname', models.CharField(max_length=45)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('telefon', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None)),
('strasse', models.CharField(max_length=45)),
('hausnummer', models.CharField(max_length=5)),
('plz', models.CharField(max_length=45)),
('stadt', models.CharField(max_length=45)),
('besucht_am', models.DateTimeField(auto_now_add=True, null=True)),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='AlkoholhaltigeDrinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('centiliter', models.FloatField()),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='AlkoholfreieDrinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('liter', models.FloatField()),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
]
| 48 | 128 | 0.567491 |
9b02d42862a5d0797afc71d43094512a70c96510
| 3,302 |
py
|
Python
|
Packs/dnstwist/Integrations/dnstwist/dnstwist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/dnstwist/Integrations/dnstwist/dnstwist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/dnstwist/Integrations/dnstwist/dnstwist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import subprocess
from CommonServerPython import *
TWIST_EXE = '/dnstwist/dnstwist.py'
if demisto.command() == 'dnstwist-domain-variations':
KEYS_TO_MD = ["whois_updated", "whois_created", "dns_a", "dns_mx", "dns_ns"]
DOMAIN = demisto.args()['domain']
LIMIT = int(demisto.args()['limit'])
WHOIS = demisto.args().get('whois')
def get_dnstwist_result(domain, include_whois):
args = [TWIST_EXE, '-f', 'json']
if include_whois:
args.append('-w')
args.append(domain)
res = subprocess.check_output(args)
return json.loads(res)
def get_domain_to_info_map(dns_twist_result):
results = []
for x in dns_twist_result:
temp = {} # type: dict
for k, v in x.items():
if k in KEYS_TO_MD:
if x["domain"] not in temp:
temp["domain-name"] = x["domain"]
if k == "dns_a":
temp["IP Address"] = v
else:
temp[k] = v
if temp:
results.append(temp)
return results
dnstwist_result = get_dnstwist_result(DOMAIN, WHOIS == 'yes')
new_result = get_domain_to_info_map(dnstwist_result)
md = tableToMarkdown('dnstwist for domain - ' + DOMAIN, new_result,
headers=["domain-name", "IP Address", "dns_mx", "dns_ns", "whois_updated", "whois_created"])
domain_context = new_result[0] # The requested domain for variations
domains_context_list = new_result[1:LIMIT + 1] # The variations domains
domains = []
for item in domains_context_list:
temp = {"Name": item["domain-name"]}
if "IP Address" in item:
temp["IP"] = item["IP Address"]
if "dns_mx" in item:
temp["DNS-MX"] = item["dns_mx"]
if "dns_ns" in item:
temp["DNS-NS"] = item["dns_ns"]
if "whois_updated" in item:
temp["WhoisUpdated"] = item["whois_updated"]
if "whois_created" in item:
temp["WhoisCreated"] = item["whois_created"]
domains.append(temp)
ec = {"Domains": domains}
if "domain-name" in domain_context:
ec["Name"] = domain_context["domain-name"]
if "IP Address" in domain_context:
ec["IP"] = domain_context["IP Address"]
if "dns_mx" in domain_context:
ec["DNS-MX"] = domain_context["dns_mx"]
if "dns_ns" in domain_context:
ec["DNS-NS"] = domain_context["dns_ns"]
if "whois_updated" in domain_context:
ec["WhoisUpdated"] = domain_context["whois_updated"]
if "whois_created" in domain_context:
ec["WhoisCreated"] = domain_context["whois_created"]
entry_result = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': dnstwist_result,
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {'dnstwist.Domain(val.Name == obj.Name)': ec}
}
demisto.results(entry_result)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
subprocess.check_output([TWIST_EXE, '-h'], stderr=subprocess.STDOUT)
demisto.results('ok')
sys.exit(0)
| 35.891304 | 117 | 0.58934 |
7b3a56677628b2dcca3ff0494700cfb7a0aa4b48
| 2,173 |
py
|
Python
|
Packs/GoogleCloudFunctions/Integrations/GoogleCloudFunctions/GoogleCloudFunctions_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/GoogleCloudFunctions/Integrations/GoogleCloudFunctions/GoogleCloudFunctions_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/GoogleCloudFunctions/Integrations/GoogleCloudFunctions/GoogleCloudFunctions_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import pytest
from GoogleCloudFunctions import resolve_default_project_id, functions_list_command
@pytest.mark.parametrize('project, credentials_json, expected_output,expected_exception', [
("some-project-id", {"credentials_json": {"type": "service_account", "project_id": "some-project-id"}},
"some-project-id", None),
(None, {"credentials_json": {"type": "service_account", "project_id": "some-project-id"}}, "some-project-id", None),
("some-project-id", {"credentials_json": {"type": "service_account"}}, "some-project-id", None),
(None, {"credentials_json": {"type": "service_account"}}, None, SystemExit)
])
def test_resolve_default_project_id(project, credentials_json, expected_output, expected_exception):
credentials_json = credentials_json.get('credentials_json')
if expected_exception is None:
assert resolve_default_project_id(project, credentials_json) == expected_output
else:
with pytest.raises(SystemExit):
assert resolve_default_project_id(project, credentials_json) == expected_output
def test_format_parameters():
from GoogleCloudFunctions import format_parameters
parameters_to_check = "key:value , name: lastname, onemorekey : to test "
result = format_parameters(parameters_to_check)
assert result == '{"key": "value", "name": "lastname", "onemorekey": "to test"}'
bad_parameters = "oh:no,bad"
with pytest.raises(ValueError):
format_parameters(bad_parameters)
class GoogleClientMock:
def __init__(self, region='region', project='project', functions=None):
if functions is None:
functions = []
self.region = region
self.project = project
self.functions = functions
def functions_list(self, region, project_id):
return {'functions': self.functions}
def test_no_functions():
"""
Given:
- Google client without functions
When:
- Running functions-list command
Then:
- Ensure expected human readable response is returned
"""
client = GoogleClientMock()
hr, _, _ = functions_list_command(client, {})
assert hr == 'No functions found.'
| 36.830508 | 120 | 0.698113 |
7b882a00a99da3e2e17e41e9f577ca3003e8abd3
| 2,561 |
py
|
Python
|
app/core/models.py
|
fxavier/abt-epts
|
021a8140db32afba106a7a9e122b98452d88c225
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
fxavier/abt-epts
|
021a8140db32afba106a7a9e122b98452d88c225
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
fxavier/abt-epts
|
021a8140db32afba106a7a9e122b98452d88c225
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that suppors using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Provincia(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Distrito(models.Model):
"""Model definition for District."""
# TODO: Define fields here
name = models.CharField(max_length=100)
provincia = models.ForeignKey('Provincia', on_delete=models.CASCADE)
def __str__(self):
"""Unicode representation of District."""
return self.name
class UnidadeSanitaria(models.Model):
"""Model definition for HealthFacility."""
id = models.CharField(max_length=255, primary_key=True)
name = models.CharField(max_length=255)
# openmrs_name = models.CharField(max_length=255, null=True, blank=True)
distrito = models.ForeignKey('Distrito', on_delete=models.CASCADE)
class Meta:
"""Meta definition for HealthFacility."""
verbose_name = 'Unidade Sanitaria'
verbose_name_plural = 'Unidades Sanitarias'
def __str__(self):
"""Unicode representation of HealthFacility."""
return self.name
class Livro(models.Model):
tipo = models.CharField(max_length=100)
numero = models.IntegerField()
pagina = models.IntegerField()
linha = models.IntegerField()
def __str__(self):
return f'{self.tipo} {self.numero}'
| 30.488095 | 76 | 0.673565 |
b540b40d9aaf331bef2f785083b2bbd7ed30bfe6
| 619 |
py
|
Python
|
Fibonacci/Python/fibonacci.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
Fibonacci/Python/fibonacci.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
Fibonacci/Python/fibonacci.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
"""
Fibonacci sequence using python
generators
Written by: Ian Doarn
"""
def fib():
# Generator that yields fibonacci numbers
a, b = 0, 1
while True: # First iteration:
yield a # yield 0 to start with and then
a, b = b, a + b # a will now be 1, and b will also be 1, (0 + 1)
if __name__ == '__main__':
# Maximum fib numbers to print
max_i = 20
for i, fib_n in enumerate(fib()):
#Print each yielded fib number
print('{i:3}: {f:3}'.format(i=i, f=fib_n))
# Break when we hit max_i value
if i == max_i:
break
| 23.807692 | 75 | 0.55412 |
8d5bd4af92a66ece14d4931534ffa3416cb4b661
| 3,919 |
py
|
Python
|
plugins/tff_backend/bizz/payment.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/bizz/payment.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178 |
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/bizz/payment.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2 |
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import time
from google.appengine.api import users
from google.appengine.ext import ndb
from framework.utils import now
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.exceptions import BusinessException
from plugins.tff_backend.models.payment import ThreeFoldTransaction, ThreeFoldPendingTransaction
from plugins.tff_backend.to.payment import WalletBalanceTO
def _get_balance_from_transactions(transactions, token):
# type: (list[ThreeFoldTransaction], unicode) -> WalletBalanceTO
available_balance = 0
total_balance = 0
total_description_details = []
# TODO set to minimum precision of all transactions when transactions have the 'precision' property
# (and multiply available / total amount depending on precision)
precision = 2
# for transaction in transactions:
# precision = max(transaction.precision, precision)
for transaction in transactions:
if transaction.token != token:
raise BusinessException('Invalid transaction supplied to _get_balance_from_transactions. '
'All transactions must have %s as token', token)
amount_spent = transaction.amount - transaction.amount_left
unlocked_amount = 0
now_ = now()
for unlock_timestamp, unlock_amount in zip(transaction.unlock_timestamps, transaction.unlock_amounts):
if unlock_timestamp <= now_:
unlocked_amount += unlock_amount
else:
total_description_details.append((unlock_timestamp, unlock_amount))
spendable_amount = unlocked_amount - amount_spent
available_balance += spendable_amount
total_balance += transaction.amount_left
if total_description_details:
total_description = u"""## %(token)s Unlock times'
|Date|#%(token)s|
|---|---:|
""" % {'token': token}
for unlock_timestamp, unlock_amount in sorted(total_description_details, key=lambda tup: tup[0]):
date = time.strftime('%a %d %b %Y %H:%M:%S GMT', time.localtime(unlock_timestamp))
amount = u'{:0,.2f}'.format(unlock_amount / 100.0)
total_description += u'\n|%s|%s|' % (date, amount)
else:
total_description = None
return WalletBalanceTO(available=available_balance, total=total_balance, description=total_description, token=token,
precision=precision)
@returns([WalletBalanceTO])
@arguments(username=unicode)
def get_all_balances(username):
transactions = ThreeFoldTransaction.list_with_amount_left(username)
token_types = set(map(lambda transaction: transaction.token, transactions))
results = []
for token in token_types:
transactions_per_token = [trans for trans in transactions if trans.token == token]
results.append(_get_balance_from_transactions(transactions_per_token, token))
return results
@returns(tuple)
@arguments(username=unicode, page_size=(int, long), cursor=unicode)
def get_pending_transactions(username, page_size, cursor):
# type: (users.User, long, unicode) -> tuple[list[ThreeFoldPendingTransaction], ndb.Cursor, bool]
return ThreeFoldPendingTransaction.list_by_user(username) \
.fetch_page(page_size, start_cursor=ndb.Cursor(urlsafe=cursor))
| 42.597826 | 120 | 0.720082 |
274a678ce7ef66ccf7cfb21453ee41a8617d1632
| 4,173 |
py
|
Python
|
m5-101/content/solutions/web-crawler/section1&2&3.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | 4 |
2021-03-25T13:15:38.000Z
|
2021-11-10T12:29:19.000Z
|
m5-101/content/solutions/web-crawler/section1&2&3.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | null | null | null |
m5-101/content/solutions/web-crawler/section1&2&3.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | 4 |
2021-03-25T13:18:10.000Z
|
2021-04-08T13:44:48.000Z
|
from posix import listdir
import requests
from bs4 import BeautifulSoup as bs
import math
import sys, getopt
import re
import os
def re_cleaner(target: str, rep: str) -> str:
return re.sub("[^0-9a-zA-Z]+", rep, target)
# For Oxford ==============================================================================
# base_url = "https://www.ox.ac.uk/"
# base_dir = "pages/oxford"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('https://www.ox.ac.uk/admissions/graduate/courses/courses-a-z-listing')
# root_soup = bs(url_pages.text, 'html.parser')
# # print(root_soup.prettify())
# # find by class attr
# course_divs = root_soup.find_all(attrs={"class": "course-title"})
# for div in course_divs:
# # 从div中取出a然后解析url
# # 用re直接find_all 符合 ** graduate/courses/ ** 的url更好解释
# link, degree = div.children
# degree = degree.strip()
# if re.search("D", degree) is None and re.match("PG", degree) is None:
# r = requests.get(base_url + link.get('href'))
# course_name = link.text
# with open(os.path.join(base_dir, re_cleaner(course_name+' '+degree, '-')+'.html'), mode='wb') as f:
# f.write(r.content)
#UIUC ==============================================================================
# base_url = "http://catalog.illinois.edu/"
# base_dir = "pages/uiuc"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('http://catalog.illinois.edu/graduate/')
# root_soup = bs(url_pages.text, 'html.parser')
# # print(root_soup.prettify())
# course_heads = root_soup.find_all("h4")
# for h in course_heads:
# # 从head中取出a然后解析url, 若有margin left, 则不考虑
# if 'style' not in h.attrs:
# # 最多分成两端,此处degree会有冗余, 但生成文件时正确的degree会在最后一个破折号处,优雅
# major, degree = h.text.split(',' ,1)
# degree = degree.strip()
# if re.search("D", degree) is None and re.match("PG", degree) is None:
# r = requests.get(base_url + h.a['href'])
# with open(os.path.join(base_dir, re_cleaner(major + ' ' + degree, ' ')+'.html'), mode='wb') as f:
# f.write(r.content)
# IC ==============================================================================
#
# base_url = "https://www.imperial.ac.uk/"
# base_dir = "pages/ic"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('https://www.imperial.ac.uk/study/pg/courses/')
# root_soup = bs(url_pages.text, 'html.parser')
# # find by class attr
# course_lis = root_soup.find_all(attrs={"class": "course"})
# for li in course_lis:
# degree = li.a.contents[5].contents[1].strip()
# if re.match("D", degree) is None and re.match("PG", degree) is None:
# url = base_url + li.a['href']
# major = li.a['title']
# r = requests.get(url)
# with open(os.path.join(base_dir, re_cleaner(major + ' ' + degree, '-')+'.html'), mode='wb') as f:
# f.write(r.content)
# Make Index ==============================================================================
import json
import pickle
def clean_html(soup: bs):
ss = soup.find_all('script')
for s in ss:
s.decompose()
return re_cleaner(soup.get_text(), ' ')
data = {}
pages_path = os.path.join(os.getcwd(), 'pages')
idx = 1
for school in os.listdir(pages_path):
school_path = os.path.join(pages_path, school)
for filename in os.listdir(school_path):
filepath = os.path.join(school_path, filename)
program, degree_html = filename.rsplit('-', 1)
degree,_ = degree_html.split('.', 1)
print(filename)
with open(filepath) as f:
soup = bs(f, 'html.parser')
desc = clean_html(soup)
jsobj = json.dumps({"document_id": idx, "school_name": school, "program_name": program, "degree": degree, "file_path": filepath, "program_desc": desc})
data[idx] = jsobj
idx += 1
pkfile = 'programs.pkl'
with open(pkfile, 'wb') as f:
pickle.dump(data, f)
| 32.601563 | 163 | 0.578481 |
275bf3b0ea75846995ad189f786825044efb445e
| 4,940 |
py
|
Python
|
chord_rec/harmalysis/classes/scale.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
chord_rec/harmalysis/classes/scale.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
chord_rec/harmalysis/classes/scale.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
'''
harmalysis - a language for harmonic analysis and roman numerals
Copyright (C) 2020 Nestor Napoles Lopez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import harmalysis.common
from harmalysis.classes import interval
class MajorScale(object):
def __init__(self):
self._qualities = [
# Starting from I
['P', 'M', 'M', 'P', 'P', 'M', 'M'],
# Starting from II
['P', 'M', 'm', 'P', 'P', 'M', 'm'],
# Starting from III
['P', 'm', 'm', 'P', 'P', 'm', 'm'],
# Starting from IV
['P', 'M', 'M', 'A', 'P', 'M', 'M'],
# Starting from V
['P', 'M', 'M', 'P', 'P', 'M', 'm'],
# Starting from VI
['P', 'M', 'm', 'P', 'P', 'm', 'm'],
# Starting from VII
['P', 'm', 'm', 'P', 'D', 'm', 'm'],
]
self._semitones = [
# Starting from I
[0, 2, 4, 5, 7, 9, 11],
# Starting from II
[0, 2, 3, 5, 7, 9, 10],
# Starting from III
[0, 1, 3, 5, 7, 8, 10],
# Starting from IV
[0, 2, 4, 6, 7, 9, 11],
# Starting from V
[0, 2, 4, 5, 7, 9, 10],
# Starting from VI
[0, 2, 3, 5, 7, 8, 10],
# Starting from VII
[0, 1, 3, 5, 6, 8, 10],
]
def step_to_interval_spelling(self, step, mode=1):
qualities = self._qualities[(mode - 1) % harmalysis.common.DIATONIC_CLASSES]
quality = qualities[(step - 1) % harmalysis.common.DIATONIC_CLASSES]
return interval.IntervalSpelling(quality, step)
def step_to_semitones(self, step, mode=1):
semitones = self._semitones[(mode - 1) % harmalysis.common.DIATONIC_CLASSES]
step_semitones = semitones[(step - 1) % harmalysis.common.DIATONIC_CLASSES]
octaves = (step - 1) // harmalysis.common.DIATONIC_CLASSES
distance = (12 * octaves) + step_semitones
return distance
class NaturalMinorScale(MajorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'm', 'm'],
['P', 'm', 'm', 'P', 'D', 'm', 'm'],
['P', 'M', 'M', 'P', 'P', 'M', 'M'],
['P', 'M', 'm', 'P', 'P', 'M', 'm'],
['P', 'm', 'm', 'P', 'P', 'm', 'm'],
['P', 'M', 'M', 'A', 'P', 'M', 'M'],
['P', 'M', 'M', 'P', 'P', 'M', 'm'],
]
self._semitones = [
[0, 2, 3, 5, 7, 8, 10],
[0, 1, 3, 5, 6, 8, 10],
[0, 2, 4, 5, 7, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[0, 1, 3, 5, 7, 8, 10],
[0, 2, 4, 6, 7, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
]
class HarmonicMinorScale(NaturalMinorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'm', 'M'],
['P', 'm', 'm', 'P', 'D', 'M', 'm'],
['P', 'M', 'M', 'P', 'A', 'M', 'M'],
['P', 'M', 'm', 'A', 'P', 'M', 'm'],
['P', 'm', 'M', 'P', 'P', 'm', 'm'],
['P', 'A', 'M', 'A', 'P', 'M', 'M'],
['P', 'm', 'm', 'D', 'D', 'm', 'D'],
]
self._semitones = [
[0, 2, 3, 5, 7, 8, 11],
[0, 1, 3, 5, 6, 9, 10],
[0, 2, 4, 5, 6, 9, 11],
[0, 2, 3, 6, 7, 9, 10],
[0, 1, 4, 5, 7, 8, 10],
[0, 3, 4, 6, 7, 9, 11],
[0, 1, 3, 4, 6, 8, 9],
]
class AscendingMelodicMinorScale(HarmonicMinorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'M', 'M'],
['P', 'm', 'm', 'P', 'P', 'M', 'm'],
['P', 'M', 'M', 'A', 'A', 'M', 'M'],
['P', 'M', 'M', 'A', 'P', 'M', 'm'],
['P', 'M', 'M', 'P', 'P', 'm', 'm'],
['P', 'M', 'm', 'P', 'D', 'm', 'm'],
['P', 'm', 'm', 'D', 'D', 'm', 'm'],
]
self._semitones = [
[0, 2, 3, 5, 7, 9, 11],
[0, 1, 3, 5, 7, 9, 10],
[0, 2, 4, 6, 8, 9, 11],
[0, 2, 4, 6, 7, 9, 10],
[0, 2, 4, 5, 7, 8, 10],
[0, 2, 3, 5, 6, 8, 10],
[0, 1, 3, 4, 6, 8, 10]
]
| 35.285714 | 84 | 0.411943 |
2775121ab7502b6919cf78437931035cd8b7a2d9
| 158 |
py
|
Python
|
src/onegov/user/auth/clients/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/auth/clients/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/auth/clients/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.user.auth.clients.kerberos import KerberosClient
from onegov.user.auth.clients.ldap import LDAPClient
__all__ = ('KerberosClient', 'LDAPClient')
| 31.6 | 60 | 0.816456 |
27811ac83801b7707ced28bf3be304104b0b4fe0
| 212 |
py
|
Python
|
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/onsets/admin.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/onsets/admin.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/onsets/admin.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from calls.onsets.models import Recording
class RecordingAdmin(admin.ModelAdmin):
list_display = ('audio', 'image', 'length')
admin.site.register(Recording, RecordingAdmin)
| 26.5 | 47 | 0.783019 |
8be7873229c136c3351120aeb123d5e799820294
| 710 |
py
|
Python
|
utils.py
|
florenthemmi/ips-by-country
|
2f63ec2108ceaae97221de52654753c545733d84
|
[
"MIT"
] | 1 |
2021-05-24T06:16:49.000Z
|
2021-05-24T06:16:49.000Z
|
utils.py
|
florenthemmi/ips-by-country
|
2f63ec2108ceaae97221de52654753c545733d84
|
[
"MIT"
] | null | null | null |
utils.py
|
florenthemmi/ips-by-country
|
2f63ec2108ceaae97221de52654753c545733d84
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from config import CIDR_MAX_SUBNETS
class IPRange(object):
def __init__(self, data):
self.range_start = data[0]
self.range_end = data[1]
self.total_ips = int(data[2])
self.assign_date = datetime.strptime(data[3], '%d/%m/%y')
self.owner = data[4]
self.cidr = IPRange.get_cidr(self.range_start, self.total_ips)
@staticmethod
def get_cidr(range_start, total_ips):
mask = CIDR_MAX_SUBNETS.get(total_ips, None)
if not mask:
return None
return '{}/{}'.format(range_start, CIDR_MAX_SUBNETS[total_ips])
def __str__(self):
return '{}'.format(self.cidr or self.range_start)
| 26.296296 | 71 | 0.640845 |
4bfe5926292aa222488a49dbf22dd03f8782815e
| 1,405 |
py
|
Python
|
exercises/pt/test_01_11.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/test_01_11.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/test_01_11.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
import spacy.matcher
assert isinstance(
matcher, spacy.matcher.Matcher
), "Você está inicializando o Comparador corretamente?"
assert (
"Matcher(nlp.vocab)" in __solution__
), "Você está inicializando o Comparador corretamente com o vocabulário compartilhado?"
assert (
len(pattern) == 2
), "A expressão deve descrever dois tokens (dois dicionários)."
assert isinstance(pattern[0], dict) and isinstance(
pattern[1], dict
), "Cada item da expressão deve conter um dicionário."
assert (
len(pattern[0]) == 1 and len(pattern[1]) == 1
), "Cada item na expressão deve conter apenas uma chave."
assert any(
pattern[0].get(key) == "iPhone" for key in ["text", "TEXT"]
), "Você está fazendo a comparação com o texto do token?"
assert any(
pattern[1].get(key) == "X" for key in ["text", "TEXT"]
), "Você está fazendo a comparação com o texto do token?"
assert (
'matcher.add("IPHONE_X_PATTERN"' in __solution__
), "Você está adicionando a expressão corretamente?"
assert (
"matches = matcher(doc)" in __solution__
), "Você está chamando o Comparador passando o doc como parâmetro?"
__msg__.good(
"Parabéns! Você identificou uma correspondência com sucesso: dois tokens "
"em doc[1:3] que correspondem a partição 'iPhone X'. "
)
| 39.027778 | 91 | 0.646263 |
ef246213ff135ecbc464dc2dd429de5edde34475
| 720 |
py
|
Python
|
backend/util.py
|
ahangchen/Rasp-Person-Sensor
|
77d0e41b1a80cf9012f66c7bd44f062edbc6825d
|
[
"MIT"
] | 2 |
2018-02-26T10:00:29.000Z
|
2018-03-16T11:39:34.000Z
|
backend/util.py
|
ahangchen/Rasp-Person-Sensor
|
77d0e41b1a80cf9012f66c7bd44f062edbc6825d
|
[
"MIT"
] | null | null | null |
backend/util.py
|
ahangchen/Rasp-Person-Sensor
|
77d0e41b1a80cf9012f66c7bd44f062edbc6825d
|
[
"MIT"
] | null | null | null |
import json
import requests
def upload_file(upload_url, file_path):
files = {'file': open(file_path, 'rb')}
response = requests.post(upload_url, files=files)
ret = response.content.decode('utf-8')
ret_json = json.loads(ret)
print ret_json
return ret_json['data']
def post_json(post_url, post_data):
headers = {'content-type': 'application/json'}
response = requests.post(post_url, data=json.dumps(post_data), headers=headers)
return response.content.decode('utf-8')
def post_form(post_url, post_data):
headers = {'content-type': 'x-www-form-urlencoded'}
response = requests.post(post_url, params=post_data, headers=headers)
return response.content.decode('utf-8')
| 27.692308 | 83 | 0.708333 |
ef5a62962aed890737736832f581c39140877b07
| 2,130 |
py
|
Python
|
Python/Searching/2/quick_select.py
|
Tikam02/Data_Structure_Algorithms
|
7c17f744975a72fa42f0f3f892c0b7e041cdef0c
|
[
"MIT"
] | 5 |
2017-08-03T06:33:49.000Z
|
2021-08-06T13:20:57.000Z
|
Python/Searching/2/quick_select.py
|
Tikam02/Data_Structure_Algorithms
|
7c17f744975a72fa42f0f3f892c0b7e041cdef0c
|
[
"MIT"
] | null | null | null |
Python/Searching/2/quick_select.py
|
Tikam02/Data_Structure_Algorithms
|
7c17f744975a72fa42f0f3f892c0b7e041cdef0c
|
[
"MIT"
] | 6 |
2017-04-27T13:30:49.000Z
|
2020-11-01T20:28:55.000Z
|
#!/usr/bin/env python
__author__ = "bt3"
import random
''' The simplest way...'''
def quickSelect(seq, k):
# this part is the same as quick sort
len_seq = len(seq)
if len_seq < 2: return seq
# we could use a random choice here doing
#pivot = random.choice(seq)
ipivot = len_seq // 2
pivot = seq[ipivot]
# O(n)
smallerList = [x for i,x in enumerate(seq) if x <= pivot and i != ipivot]
largerList = [x for i,x in enumerate(seq) if x > pivot and i != ipivot]
# here starts the different part
m = len(smallerList)
if k == m:
return pivot
elif k < m:
return quickSelect(smallerList, k)
else:
return quickSelect(largerList, k-m-1)
''' If you don't want to use pythons feature at all and
also select pivot randomly'''
def swap(seq, x, y):
tmp = seq[x]
seq[x] = seq[y]
seq[y] = tmp
def quickSelectHard(seq, k, left=None, right=None):
left = left or 0
right = right or len(seq) - 1
#ipivot = random.randint(left, right)
ipivot = len(seq)//2
pivot = seq[ipivot]
# Move pivot out of the sorting range
swap(seq, ipivot, right)
swapIndex, i = left, left
while i < right:
if seq[i] < pivot:
swap(seq, i, swapIndex)
swapIndex += 1
i += 1
# Move pivot to final position
swap(seq, right, swapIndex)
# Check if pivot matches, else recurse on the correct half
rank = len(seq) - swapIndex
if k == rank:
return seq[swapIndex]
elif k < rank:
return quickSelectHard(seq, k, swapIndex+1, right)
else:
return quickSelectHard(seq, k, left, swapIndex-1)
if __name__ == '__main__':
# Checking the Answer
seq = [10, 60, 100, 50, 60, 75, 31, 50, 30, 20, 120, 170, 200]
#seq = [3, 7, 2, 1, 4, 6, 5, 10, 9, 11]
# we want the middle element
k = len(seq) // 2
# Note that this only work for odd arrays, since median in
# even arrays is the mean of the two middle elements
print(quickSelect(seq, k))
print(quickSelectHard(seq, k))
import numpy
print numpy.median(seq)
| 23.932584 | 78 | 0.597653 |
dee00922a67f6dff4732cf526028648896d0fc92
| 2,290 |
py
|
Python
|
Phototweet.py
|
sbamueller/RasperryPi_BildFeinstaub
|
3666db384ead64893b3c548065aa31cef6c126af
|
[
"Apache-2.0"
] | null | null | null |
Phototweet.py
|
sbamueller/RasperryPi_BildFeinstaub
|
3666db384ead64893b3c548065aa31cef6c126af
|
[
"Apache-2.0"
] | null | null | null |
Phototweet.py
|
sbamueller/RasperryPi_BildFeinstaub
|
3666db384ead64893b3c548065aa31cef6c126af
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.7
# coding=<UTF-8>
# tweetpic.py take a photo with the Pi camera and tweet it
# by Alex Eames http://raspi.tv/?p=5918
import tweepy
from subprocess import call
from datetime import datetime
import requests
import json
i = datetime.now() #take time and date for filename
now = i.strftime('%Y%m%d-%H%M%S')
photo_name = now + '.jpg'
cmd = 'raspistill -t 500 -w 1024 -h 768 -o /home/pi/Pictures' + photo_name
call ([cmd], shell=True) #shoot the photo
def pick_values(sensor):
# Sensordaten fr SDS011 und DHT11 abfragen
# dazu die api von luftdaten.info nutzen
# Peter Furle @Alpensichtung Hotzenwald 04 2017
r = requests.get(sensor)
json_string = r.text
parsed_json = json.loads(json_string)
# pretty print um uberhaupt zu verstehen was da passiert
# print json.dumps(parsed_json, sort_keys=True, indent=4, separators=(',','$
l = len(parsed_json)-1
a = len(parsed_json[l]['sensordatavalues'])
if a == 1:
result=(parsed_json[l]['sensordatavalues'][0]['value_type'])+": "+(pars$
if a == 2:
result=(parsed_json[l]['sensordatavalues'][0]['value_type'])+": "+(pars$
result=result+" "+(parsed_json[l]['sensordatavalues'][1]['value_type'])$
return(result)
# Freiburger Sensor von sbamueller
url = 'http://api.luftdaten.info/static/v1/sensor/534/'
tweet = pick_values(url)
url = 'http://api.luftdaten.info/static/v1/sensor/533/'
tweet = tweet + " " + pick_values(url)
# Texte 140 Zeichen Tweets
tweet = tweet.replace('temperature: ','| Temp C:')
tweet = tweet.replace('P1:','| PM10:')
tweet = tweet.replace('P2:','PM2.5:')
#print(tweet)
# Consumer keys and access tokens, used for OAuth
CONSUMER_KEY = 'ihrKey'
CONSUMER_SECRET = 'ihrKey'
ACCESS_KEY = 'ihrKey'
ACCESS_SECRET = 'ihrKey'
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(CONSUMER_KEY , CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY , ACCESS_SECRET)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
# Send the tweet with photo
photo_path = '/home/pi/Pictures' + photo_name
status = 'Blick auf Freiburg mit Feinstaubwerten, Temp & Luftfeuchte ' + i.strf$
status = status + tweet
api.update_with_media(photo_path, status=status)
| 31.369863 | 80 | 0.691266 |
cd63c34fbdfbd183f707a4b54997655b51643809
| 3,417 |
py
|
Python
|
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from morepath import redirect
from onegov.core.security import Private
from onegov.gazette import _
from onegov.gazette import GazetteApp
from onegov.gazette.forms import EmptyForm
from onegov.gazette.layout import Layout
from onegov.user import UserGroup
from onegov.user import UserGroupCollection
from onegov.user.forms import UserGroupForm
@GazetteApp.html(
model=UserGroupCollection,
template='groups.pt',
permission=Private
)
def view_groups(self, request):
""" View all the user groups.
This view is only visible by an admin.
"""
layout = Layout(self, request)
return {
'layout': layout,
'groups': self.query().all(),
'title': _('Groups'),
'new_group': request.link(self, name='new-group')
}
@GazetteApp.form(
model=UserGroupCollection,
name='new-group',
template='form.pt',
permission=Private,
form=UserGroupForm
)
def create_group(self, request, form):
""" Create a new user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if form.submitted(request):
self.add(name=form.name.data)
request.message(_("Group added."), 'success')
return redirect(layout.manage_groups_link)
return {
'layout': layout,
'form': form,
'title': _("New Group"),
'cancel': layout.manage_groups_link
}
@GazetteApp.form(
model=UserGroup,
name='edit',
template='form.pt',
permission=Private,
form=UserGroupForm
)
def edit_group(self, request, form):
""" Edit a user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_("Group modified."), 'success')
return redirect(layout.manage_groups_link)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'title': self.name,
'subtitle': _("Edit Group"),
'cancel': layout.manage_groups_link
}
@GazetteApp.form(
model=UserGroup,
name='delete',
template='form.pt',
permission=Private,
form=EmptyForm
)
def delete_group(self, request, form):
""" Delete a user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if self.official_notices:
request.message(
_("There are official notices linked to this group!"),
'warning'
)
if self.users.count():
request.message(
_('Only groups without users may be deleted.'),
'alert'
)
return {
'layout': layout,
'title': self.name,
'subtitle': _("Delete Group"),
'show_form': False
}
if form.submitted(request):
UserGroupCollection(request.session).delete(self)
request.message(_("Group deleted."), 'success')
return redirect(layout.manage_groups_link)
return {
'message': _(
'Do you really want to delete "${item}"?',
mapping={'item': self.name}
),
'layout': layout,
'form': form,
'title': self.name,
'subtitle': _("Delete Group"),
'button_text': _("Delete Group"),
'button_class': 'alert',
'cancel': layout.manage_groups_link
}
| 23.244898 | 66 | 0.605502 |
f89c748dd51197d30a5af7af230eb9f70959fb01
| 894 |
py
|
Python
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import gast as ast
from beniget import Ancestors, DefUseChains as DUC, UseDefChains
from beniget.beniget import Def
__all__ = ["Ancestors", "DefUseChains", "UseDefChains"]
class DefUseChains(DUC):
def visit_List(self, node):
if isinstance(node.ctx, ast.Load):
dnode = self.chains.setdefault(node, Def(node))
for elt in node.elts:
if isinstance(elt, CommentLine):
continue
self.visit(elt).add_user(dnode)
return dnode
# unfortunately, destructured node are marked as Load,
# only the parent List/Tuple is marked as Store
elif isinstance(node.ctx, ast.Store):
return self.visit_Destructured(node)
visit_Tuple = visit_List
# this import has to be after the definition of DefUseChains
from transonic.analyses.extast import CommentLine # noqa: E402
| 29.8 | 64 | 0.659955 |
3e78c123f36641a6b522ac2d459248b01e28de60
| 1,204 |
py
|
Python
|
hello/hello_pil.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
hello/hello_pil.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
hello/hello_pil.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
im = Image.open('F:/workspace/python/data/backpink.jpg')
im2 = im.filter(ImageFilter.BLUR)
im2.save('F:/workspace/python/data/backpink_blur.png', 'png')
im2.save('F:/workspace/python/data/backpink_blur.jpg', 'jpeg')
# 随机字母:
def random_char():
return chr(random.randint(65, 90))
# 随机颜色1:
def random_color():
return random.randint(64, 255), random.randint(64, 255), random.randint(64, 255)
# 随机颜色2:
def random_color2():
return random.randint(32, 127), random.randint(32, 127), random.randint(32, 127)
# 240 x 60:
width = 60 * 4
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
# 创建Font对象:
font = ImageFont.truetype('C:/Windows/Fonts/Arial.ttf', 36)
# 创建Draw对象:
draw = ImageDraw.Draw(image)
# 填充每个像素:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=random_color())
# 输出文字:
for t in range(4):
draw.text((60 * t + 10, 10), random_char(), font=font, fill=random_color2())
# 模糊:
image = image.filter(ImageFilter.BLUR)
image.save('code.jpg', 'jpeg')
print((image.format, image.size, image.mode))
# image.show()
| 24.571429 | 84 | 0.680233 |
39cd57d3e96930bf2512f61084f0ec5dbd909936
| 2,129 |
py
|
Python
|
django_project/apps/qfauth/forms.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
django_project/apps/qfauth/forms.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
django_project/apps/qfauth/forms.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
from django import forms
from apps.forms import FormMixin
from django.core import validators
from .models import User
from django.core.cache import cache
class LoginForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11,min_length=11)
password = forms.CharField(max_length=30,min_length=6,error_messages={"max_length":"密码最多不能超过30个字符","min_length":"密码最少不能少于6个字符"})
remember = forms.IntegerField(required=False)
class RegisterForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11, min_length=11,validators=[validators.RegexValidator(r'1[3-9]\d{9}',message="请输入正确的手机号")])
username = forms.CharField(max_length=30)
password1 = forms.CharField(max_length=30,min_length=6,error_messages={"max_length":"密码最多不能超过30个字符","min_length":"密码最少不能少于6个字符"})
password2 = forms.CharField(max_length=30,min_length=6,error_messages={"max_length":"密码最多不能超过30个字符","min_length":"密码最少不能少于6个字符"})
img_captcha = forms.CharField(max_length=4,min_length=4)
sms_captcha = forms.CharField(max_length=4,min_length=4)
def clean(self):
cleaned_data = super(RegisterForm, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError('两次密码输入不一致')
#验证图形验证码
img_captcha = cleaned_data.get('img_captcha')#用户输入的
cache_img_captcha = cache.get(img_captcha.lower()) #缓存中的
print(cache_img_captcha)
if not cache_img_captcha or img_captcha.lower() != cache_img_captcha.lower():
raise forms.ValidationError('图形验证码输入错误')
#验证短信验证码
telephone = cleaned_data.get('telephone')
sms_captcha = cleaned_data.get('sms_captcha') # 用户输入的
cache_sms_captcha = cache.get(telephone) # 缓存中的
if not cache_sms_captcha or sms_captcha.lower() != cache_sms_captcha.lower():
raise forms.ValidationError('短信验证码输入错误')
exists = User.objects.filter(telephone=telephone).exists()
if exists:
forms.ValidationError('该手机号已经被注册')
return cleaned_data
| 43.44898 | 136 | 0.716768 |
f24b88cb32a898b91b261cd705b2ad3fcd5d1287
| 2,950 |
py
|
Python
|
extension/visualizer/generate_visualizer_header.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 2,816 |
2018-06-26T18:52:52.000Z
|
2021-04-06T10:39:15.000Z
|
extension/visualizer/generate_visualizer_header.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310 |
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
extension/visualizer/generate_visualizer_header.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270 |
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
# this script generates visualizer header
import os
visualizer_dir = 'extension/visualizer'
visualizer_css = os.path.join(visualizer_dir, 'visualizer.css')
visualizer_d3 = os.path.join(visualizer_dir, 'd3.js')
visualizer_script = os.path.join(visualizer_dir, 'script.js')
visualizer_header = os.path.join(visualizer_dir, 'include', 'visualizer_constants.hpp')
def open_utf8(fpath, flags):
import sys
if sys.version_info[0] < 3:
return open(fpath, flags)
else:
return open(fpath, flags, encoding="utf8")
def get_byte_array(fpath, add_null_terminator = True):
with open(fpath, 'rb') as f:
text = bytearray(f.read())
result_text = ""
first = True
for byte in text:
if first:
result_text += str(byte)
else:
result_text += ", " + str(byte)
first = False
if add_null_terminator:
result_text += ", 0"
return result_text
def write_file(fname, varname):
result = "const uint8_t %s[] = {" % (varname,) + get_byte_array(fname) + "};\n"
return result
def create_visualizer_header():
result = """/* THIS FILE WAS AUTOMATICALLY GENERATED BY generate_visualizer_header.py */
/*
Copyright 2010-2020 Mike Bostock
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the author nor the names of contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
"""
result += write_file(visualizer_css, "css")
result += write_file(visualizer_d3, "d3")
result += write_file(visualizer_script, "script")
with open_utf8(visualizer_header, 'w+') as f:
f.write(result)
create_visualizer_header()
| 36.419753 | 92 | 0.737627 |
f284677f3d515ed6519b9b9782d95ab9e355ded5
| 4,052 |
py
|
Python
|
Controller/control/WorkerControl.py
|
th-nuernberg/ml-cloud
|
6d7527cbf6cceb7062e74dbc43d51998381aa6c8
|
[
"MIT"
] | null | null | null |
Controller/control/WorkerControl.py
|
th-nuernberg/ml-cloud
|
6d7527cbf6cceb7062e74dbc43d51998381aa6c8
|
[
"MIT"
] | 7 |
2020-07-19T03:29:21.000Z
|
2022-03-02T06:46:12.000Z
|
Controller/control/WorkerControl.py
|
th-nuernberg/ml-cloud
|
6d7527cbf6cceb7062e74dbc43d51998381aa6c8
|
[
"MIT"
] | null | null | null |
import json
import queue
from control.WorkerQueue import WorkerQueue as WQ
from data.StorageIO import StorageIO
'''
The WorkerControl coordinates workers and assigns jobs.
Worker register themself at startup. The controller queues workers as well as jobs in two seperate queues.
As soon as a worker and a job are available, they are taken from the queues and the job_id is send to the worker
via MQTT. After the worker finishes its job, it will be put back into the queue
'''
class WorkerControl:
config_queue = queue.Queue(-1) # infinite size
COMMAND_START = "start"
COMMAND_STOP = "stop"
commandIO = None
storageIO: StorageIO = None
worker_list = {} # "worker_id" : "job_id"
worker_job_mapping = {}
worker_queue = WQ()
def get_worker_info(self):
return self.worker_list
# Function called by external Thread !!!
def busy_changed_callback(self, worker_id, busy_message):
try:
if len(busy_message) == 0:
print("Worker LOST: " + worker_id)
self.worker_queue.remove_worker(worker_id)
self.worker_list.pop(worker_id, None)
if not worker_id in self.worker_job_mapping:
print("Unknown worker reported busy change! This should not happen")
else:
self.update_status(worker_id, "lost")
else:
message = json.loads(busy_message)
is_busy = message["busy"] # either False or the job_id
self.worker_list[worker_id] = is_busy
if is_busy == False:
if "job_id" in message:
self.update_status(worker_id, message["status"])
if worker_id in self.worker_job_mapping:
del self.worker_job_mapping[worker_id]
self.worker_queue.add_to_queue(worker_id)
else:
job_id = message["job_id"]
self.worker_queue.remove_worker(worker_id)
self.worker_job_mapping[worker_id] = job_id
self.update_status(worker_id, message["status"])
print("Worker is busy: " + worker_id)
except Exception as e:
print("An error occurred in MQTT callback: " + str(e))
def update_status(self, worker_id: str, status: str):
if not worker_id in self.worker_job_mapping:
print("ERROR. Tried to set status for unset worker!")
else:
self.storageIO.update_job_status(self.worker_job_mapping[worker_id], status)
def __init__(self, commandIO, storageIO: StorageIO):
self.commandIO = commandIO
self.storageIO = storageIO
self.commandIO.on_busy_changed(self.busy_changed_callback)
def modify_job_state(self, job_list, command: str):
for job in job_list:
config = {"job_id": job}
if command == self.COMMAND_START:
self.create_new_job(config)
else:
pass
# Function called by external Thread !!!
def create_new_job(self, job_config: dict):
try:
print("-> Job ready (ID=" + job_config["job_id"] + ")")
self.config_queue.put(job_config, timeout=1)
except:
return False
return True
def run(self):
while (True):
jsonConfig = self.config_queue.get()
job_id = jsonConfig["job_id"]
print("<- Job selected (ID=" + job_id + ")")
ready_worker = self.worker_queue.get_next_worker()
print("Starting new job (id: " + job_id + ")")
self.commandIO.start_new_job(ready_worker, json.dumps(jsonConfig))
if ready_worker in self.worker_job_mapping:
print("Removing orphaned job from worker job mapping")
del self.worker_job_mapping[ready_worker]
self.worker_job_mapping[ready_worker] = job_id
self.update_status(ready_worker, "assigned")
| 38.226415 | 112 | 0.607601 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.