# coding=utf-8
# Copyright 2018 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of League client variable values.
Some of Rito's API methods return description strings that have variables.
Unfortunately, Rito habitually does not make variable values accessible through
the API. Instead, the substitution table lives in the League client and we chose
to copy them here.
"""
from __future__ import unicode_literals
REFORGED_RUNE_VARS = {
'SummonAery': {
'@DamageBase@': '15',
'@DamageMax@': '40',
'@DamageAPRatio.-1@': '0.1',
'@DamageADRatio.-1@': '0.15',
'@ShieldBase@': '30',
'@ShieldMax@': '80',
'@ShieldRatio.-1@': '0.25',
'@ShieldRatioAD.-1@': '0.4',
},
'ArcaneComet': {
'@DamageBase@': '30',
'@DamageMax@': '100',
'@APRatio.-1@': '0.2',
'@ADRatio.-1@': '0.35',
'@RechargeTime@': '20',
'@RechargeTimeMin@': '8',
'@PercentRefund*100@': '20',
'@AoEPercentRefund*100@': '10',
'@DotPercentRefund*100@': '5',
},
'PhaseRush': {
'@Window@': '3',
'@HasteBase*100@': '15',
'@HasteMax*100@': '40',
'@SlowResist*100@': '75',
'@Duration@': '3',
'@Cooldown@': '15',
},
# TODO: Fill in the rest of these.
}
"""
Structure for parsed as dict request or response syntax values.
"""
from typing import Any, Dict, List, Optional
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.logger import get_logger
from mypy_boto3_builder.service_name import ServiceName
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
from mypy_boto3_builder.type_annotations.type import Type
from mypy_boto3_builder.type_annotations.type_annotation import TypeAnnotation
from mypy_boto3_builder.type_annotations.type_literal import TypeLiteral
from mypy_boto3_builder.type_annotations.type_subscript import TypeSubscript
from mypy_boto3_builder.type_annotations.type_typed_dict import TypeTypedDict
from mypy_boto3_builder.type_maps.shape_type_map import get_shape_type_stub
from mypy_boto3_builder.type_maps.syntax_type_map import SYNTAX_TYPE_MAP
class TypeValue:
"""
Structure for parsed as dict request or response syntax values.
"""
def __init__(self, service_name: ServiceName, prefix: str, value: Dict[str, Any]) -> None:
self.service_name = service_name
self.logger = get_logger()
self.prefix = prefix
self.raw: Dict[str, Any] = value
self.dict_items: Optional[List[Dict[str, Any]]] = value.get("dict_items")
if value.get("empty_dict"):
self.dict_items = []
self.set_items: Optional[List[Any]] = value.get("set_items")
self.list_items: Optional[List[Any]] = value.get("list_items")
if value.get("empty_list"):
self.list_items = []
self.func_call: Optional[Dict[str, Any]] = value.get("func_call")
self.union_items: List[Any] = []
if value.get("union_first_item"):
self.union_items.append(value["union_first_item"])
self.union_items.extend(value["union_rest_items"])
self.literal_items: List[Any] = []
if value.get("literal_first_item"):
self.literal_items.append(value["literal_first_item"])
self.literal_items.extend(value["literal_rest_items"])
self.value: Optional[str] = value.get("value")
def is_dict(self) -> bool:
"""
Whether value is Dict.
"""
return self.dict_items is not None
def is_list(self) -> bool:
"""
Whether value is List.
"""
return self.list_items is not None
def is_literal(self) -> bool:
"""
Whether value is Literal.
"""
return bool(self.literal_items)
def is_set(self) -> bool:
"""
Whether value is Set.
"""
return bool(self.set_items)
def is_union(self) -> bool:
"""
Whether value is Union.
"""
return bool(self.union_items)
def is_func_call(self) -> bool:
"""
Whether value is Callable.
"""
return bool(self.func_call)
def is_plain(self) -> bool:
"""
Whether value is not None.
"""
return self.value is not None
def _get_type_dict(self) -> FakeAnnotation:
if not self.dict_items:
return Type.DictStrAny
first_key = self.dict_items[0]["key"]
if first_key in SYNTAX_TYPE_MAP:
result = TypeSubscript(Type.Dict)
result.add_child(SYNTAX_TYPE_MAP[first_key])
result.add_child(
TypeValue(self.service_name, self.prefix, self.dict_items[0]["value"]).get_type()
)
return result
typed_dict_name = f"{self.prefix}TypeDef"
shape_type_stub = get_shape_type_stub(self.service_name, typed_dict_name)
if shape_type_stub:
return shape_type_stub
typed_dict = TypeTypedDict(typed_dict_name)
for item in self.dict_items:
key_name = self._parse_constant(item["key"])
prefix = f"{self.prefix}{key_name}"
typed_dict.add_attribute(
key_name,
TypeValue(self.service_name, prefix, item["value"]).get_type(),
required=False,
)
return typed_dict
def _get_type_list(self) -> TypeSubscript:
if not self.list_items:
return TypeSubscript(Type.List, [Type.Any])
result = TypeSubscript(Type.List)
for item_index, item in enumerate(self.list_items):
prefix = f"{self.prefix}{item_index if item_index else ''}"
result.add_child(TypeValue(self.service_name, prefix, item).get_type())
return result
def _get_type_union(self) -> FakeAnnotation:
if not self.union_items:
return Type.Any
result = TypeSubscript(Type.Union)
for item_index, item in enumerate(self.union_items):
prefix = f"{self.prefix}{item_index if item_index else ''}"
result.add_child(TypeValue(self.service_name, prefix, item).get_type())
if all(i is result.children[0] for i in result.children):
return result.children[0]
return result
def _get_type_set(self) -> TypeAnnotation:
if not self.set_items:
return Type.Any
plain_values = [i["value"] for i in self.set_items]
if plain_values == ["'... recursive ...'"]:
return Type.Any
self.logger.warning(f"Unknown set: {self.raw}, fallback to Any")
return Type.Any
def _get_type_func_call(self) -> FakeAnnotation:
if not self.func_call:
raise ValueError(f"Value is not a func call: {self.raw}")
if self.func_call["name"] == "datetime":
return ExternalImport(ImportString("datetime"), "datetime")
if self.func_call["name"] == "StreamingBody":
return ExternalImport(ImportString("botocore", "response"), "StreamingBody")
if self.func_call["name"] == "EventStream":
return ExternalImport(ImportString("botocore", "eventstream"), "EventStream")
self.logger.warning(f"Unknown function: {self.raw}, fallback to Any")
return Type.Any
def _get_type_plain(self) -> FakeAnnotation:
if not self.value or isinstance(self.value, dict):
raise ValueError(f"Value is not plain: {self.raw}")
if self.value in SYNTAX_TYPE_MAP:
return SYNTAX_TYPE_MAP[self.value]
if self.value.startswith("'"):
return Type.str
self.logger.warning(f"Unknown plain value: {self.raw}, fallback to Any")
return Type.Any
def is_literal_item(self) -> bool:
"""
Whether value is Literal item.
"""
if self.value is None:
return False
return self.value.startswith("'")
def _get_type_literal(self) -> FakeAnnotation:
if not self.literal_items:
raise ValueError(f"Value is not literal: {self.raw}")
items = [TypeValue(self.service_name, self.prefix, i) for i in self.literal_items]
if all(i.is_literal_item() for i in items):
item_constants = [self._parse_constant(i.value or "") for i in items]
return TypeLiteral(f"{self.prefix}Type", item_constants)
item_types = [i.get_type() for i in items]
if all([i is item_types[0] for i in item_types]):
return item_types[0]
return TypeSubscript(Type.Union, item_types)
@staticmethod
def _parse_constant(value: str) -> Any:
if value.startswith("'"):
return value.replace("'", "")
if value.isdigit():
return int(value)
raise ValueError(f"Invalid constant: {value}")
def get_type(self) -> FakeAnnotation:
"""
Get value type.
"""
if self.is_list():
return self._get_type_list()
if self.is_dict():
return self._get_type_dict()
if self.is_set():
return self._get_type_set()
if self.is_func_call():
return self._get_type_func_call()
if self.is_union():
return self._get_type_union()
if self.is_literal():
return self._get_type_literal()
if self.is_plain():
return self._get_type_plain()
raise ValueError(f"Unknown value: {self.raw}")
34.987395
97
0.630839
793f58be9da27b680ff8fd87f827a7e47f61d389
5,181
py
Python
ctpbee/json/pollen.py
yutiansut/ctpbee
02ceb3d4456a54b1b4f8066a2662c4b8fac1027f
[
"MIT"
]
null
null
null
ctpbee/json/pollen.py
yutiansut/ctpbee
02ceb3d4456a54b1b4f8066a2662c4b8fac1027f
[
"MIT"
]
null
null
null
ctpbee/json/pollen.py
yutiansut/ctpbee
02ceb3d4456a54b1b4f8066a2662c4b8fac1027f
[
"MIT"
]
3
2019-11-21T03:38:14.000Z
2022-02-14T08:09:11.000Z
import json
from collections import defaultdict
class ProxyPollen(object):
"""
+-------------------+---------------+--------------------+
| Python | JSON |Pollen(Can change |
+===================+===============+====================+
| dict | object |cls:Data,Request |
+-------------------+---------------+--------------------+
| list, tuple,set | array | |
+-------------------+---------------+--------------------+
| str | string | Enum |
+-------------------+---------------+--------------------+
| int, float | number | |
+-------------------+---------------+--------------------+
| True | true | |
+-------------------+---------------+--------------------+
| False | false | |
+-------------------+---------------+--------------------+
| None | null | |
+-------------------+---------------+--------------------+
|Datetime | str(Datetime) | Datetime |
+-------------------+---------------+--------------------+
"""
"""
str_can_to:用于筛选str转python类型时的tag类,存在str_tags
default_tags:所有tag实例
str_tags: ""
enum_store:自定义Enum仓库
data_class_store:自定义Data类仓库[BaseDataClass,BaseRequestClass]
data_base_class: BaseData
request_base_class: BaseRequest
"""
str_can_to = ['enum', 'datetime']
default_tags = dict()
str_tags = dict()
enum_store = dict()
data_class_store = defaultdict(set)
data_base_class = None
request_base_class = None
def __init__(self, tags: list = None, enums: list = None, data_class=None, request_class=None):
if tags: self.labeling(tags)
if enums: self.add_enum(enums)
if data_class: self.add_data_class(data_class)
if request_class: self.add_request_class(request_class)
self._init_class_store()
def _init_class_store(self):
# 初始化 data_class_store
data = data_class + request_class
for cls in data:
cls_name = cls.__name__
attribute = set()
for c in cls.__dict__['__annotations__']:
if c.startswith("__") or c.startswith("create"):
continue
attribute.add(c)
self.data_class_store[cls] = attribute
def labeling(self, tags: list):
"""
添加tag类
:param tags:
:return:
"""
if not isinstance(tags, list): raise TypeError("[^^]tags must list")
for t in tags:
self.default_tags[t.tag] = t(self)
if t.tag in self.str_can_to:
self.str_tags[t.tag] = t(self)
def add_enum(self, enums: list):
"""
添加自定义Enum类属性值
:param enums:
:return:
"""
if not isinstance(enums, list): raise TypeError("[^^]enums must list")
for e in enums:
for _, v in e.__members__.items():
self.enum_store[v.value] = v
def add_data_class(self, data_class: list):
"""
{cls_name:{attr1,attr2},} 模糊获取类变量属性
:param data_class:
:return:
"""
if not isinstance(data_class, list): raise TypeError("[^^]data_class must list")
self.data_base_class = data_class[0].__bases__
def add_request_class(self, request_class: list):
"""
{cls_name:{attr1,attr2},} 模糊获取类变量属性
:param request_class:
:return:
"""
if not isinstance(request_class, list): raise TypeError("[^^]request_class must list")
self.request_base_class = request_class[0].__bases__
def update_data_class_store(self, data):
"""
在dumps时将类实例的全部属性覆盖模糊获取的属性,提高精确性
:param data: Dataclass或RequestClass实例
:return:
"""
cls_name = data.__class__.__name__
for c in list(self.data_class_store.keys()):
if c.__name__ == cls_name:
self.data_class_store[c] = set(data._to_dict().keys())
@classmethod
def find_tag(cls, value):
"""
:param value:
:return:
"""
for t in cls.default_tags.values():
if t.check(value):
return t
@classmethod
def loads(cls, json_data):
"""
to python
:param value:
:return:
"""
if isinstance(json_data, str):
json_data = json.loads(json_data)
tag = cls.find_tag(json_data)
if tag:
return tag.to_pollen(json_data)
@classmethod
def dumps(cls, value):
"""
to json
:param value:
:return:
"""
tag = cls.find_tag(value)
if tag:
return json.dumps(tag.to_json(value), ensure_ascii=False)
from .tag import tags
from ctpbee.constant import enums, data_class, request_class
Pollen = ProxyPollen(tags=tags, enums=enums, data_class=data_class, request_class=request_class)
33.425806
99
0.47597
793f5a0c237e6621305c613c3fe49be6382c6e0a
4,170
py
Python
ansible/modules/windows/win_command.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
]
1
2022-01-25T22:52:58.000Z
2022-01-25T22:52:58.000Z
ansible/modules/windows/win_command.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
]
null
null
null
ansible/modules/windows/win_command.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
]
null
null
null
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
- For non-Windows targets, use the M(command) module instead.
options:
free_form:
description:
- the C(win_command) module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
creates:
description:
- a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
removes:
description:
- a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
chdir:
description:
- set the specified path as the current working directory before executing a command
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
- For non-Windows targets, use the M(command) module instead.
author:
- Matt Davis
'''
EXAMPLES = r'''
- name: Save the result of 'whoami' in 'whoami_out'
win_command: whoami
register: whoami_out
- name: Run command that only runs if folder exists and runs from a specific folder
win_command: wbadmin -backupTarget:C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
33.36
153
0.694724
793f5a42d0e99ced330446a62cb47cb9233fa4e8
203
py
Python
couscous/__main__.py
sthagen/improved-couscous
797807f5c4a834f60c8a7d61b7477df46cd775aa
[
"MIT"
]
1
2021-03-07T11:08:57.000Z
2021-03-07T11:08:57.000Z
couscous/__main__.py
sthagen/improved-couscous
797807f5c4a834f60c8a7d61b7477df46cd775aa
[
"MIT"
]
25
2021-03-02T21:14:54.000Z
2021-03-02T22:00:30.000Z
couscous/__main__.py
sthagen/improved-couscous
797807f5c4a834f60c8a7d61b7477df46cd775aa
[
"MIT"
]
null
null
null
# -*- coding: utf-8 -*-
# pylint: disable=expression-not-assigned,line-too-long
import sys
from couscous.cli import main
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) # pragma: no cover
22.555556
55
0.684729
793f5dddd22364d937f649667f226d2f0741584b
2,655
py
Python
Commands/Log.py
Heufneutje/PyMoronBot
055abf0e685f3d2fc02863517952dc7fad9050f3
[
"MIT"
]
null
null
null
Commands/Log.py
Heufneutje/PyMoronBot
055abf0e685f3d2fc02863517952dc7fad9050f3
[
"MIT"
]
null
null
null
Commands/Log.py
Heufneutje/PyMoronBot
055abf0e685f3d2fc02863517952dc7fad9050f3
[
"MIT"
]
null
null
null
# -*- coding: utf-8 -*-
"""
Created on May 11, 2014
@author: Tyranic-Moron
"""
import datetime
import codecs
import os
from moronbot import cmdArgs
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
import GlobalVars
logFuncs = {
'PRIVMSG': lambda m: u'<{0}> {1}'.format(m.User.Name, m.MessageString),
'ACTION': lambda m: u'*{0} {1}*'.format(m.User.Name, m.MessageString),
'NOTICE': lambda m: u'[{0}] {1}'.format(m.User.Name, m.MessageString),
'JOIN': lambda m: u' >> {0} ({1}@{2}) joined {3}'.format(m.User.Name, m.User.User, m.User.Hostmask, m.ReplyTo),
'NICK': lambda m: u'{0} is now known as {1}'.format(m.User.Name, m.MessageString),
'PART': lambda m: u' << {0} ({1}@{2}) left {3}{4}'.format(m.User.Name, m.User.User, m.User.Hostmask, m.ReplyTo, m.MessageString),
'QUIT': lambda m: u' << {0} ({1}@{2}) quit{3}'.format(m.User.Name, m.User.User, m.User.Hostmask, m.MessageString),
'KICK': lambda m: u'!<< {0} was kicked by {1}{2}'.format(m.Kickee, m.User.Name, m.MessageString),
'TOPIC': lambda m: u'# {0} set the topic to: {1}'.format(m.User.Name, m.MessageString),
'MODE': lambda m: u'# {0} sets mode: {1}{2} {3}'.format(m.User.Name, m.ModeOperator, m.Modes, ' '.join(m.ModeArgs))
}
def log(text, target):
now = datetime.datetime.utcnow()
time = now.strftime("[%H:%M]")
data = u'{0} {1}'.format(time, text)
print target, data
fileName = "{0}{1}.txt".format(target, now.strftime("-%Y%m%d"))
fileDirs = os.path.join(GlobalVars.logPath, cmdArgs.server)
if not os.path.exists(fileDirs):
os.makedirs(fileDirs)
filePath = os.path.join(fileDirs, fileName)
with codecs.open(filePath, 'a+', 'utf-8') as f:
f.write(data + '\n')
class Log(CommandInterface):
triggers = []#['log']
help = "Logs {} messages.".format("/".join(logFuncs.keys()))#"log (-n / yyyy-mm-dd) - " \
#"without parameters, links to today's log. " \
#"-n links to the log n days ago. " \
#"yyyy-mm-dd links to the log for the specified date"
priority = -1
def shouldExecute(self, message):
"""
@type message: IRCMessage
"""
return True
def execute(self, message):
"""
@type message: IRCMessage
"""
if message.Type in logFuncs:
logString = logFuncs[message.Type](message)
log(logString, message.ReplyTo)
if message.Type in self.acceptedTypes and message.Command in self.triggers:
# log linking things
super(Log, self).execute(message)
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class CMYKColors(str, Enum):
cyan = "cyan"
magenta = "Magenta"
yellow = "YELLOW"
blac_k = "blacK"
class GoblinSharkColor(str, Enum):
pink = "pink"
gray = "gray"
brown = "brown"
class MyKind(str, Enum):
kind1 = "Kind1"
22.878788
76
0.54702
793f5f4d80b6062d18b2c3630e41d33e2a9a5abf
8,039
py
Python
cogs/general/meta.py
Mystic-Alchemy/Vale.py
b4cc964d34672444c65e2801a15f37d774c5e6e3
[
"MIT"
]
1
2018-10-13T17:58:58.000Z
2018-10-13T17:58:58.000Z
cogs/general/meta.py
Mystic-Alchemy/Vale.py
b4cc964d34672444c65e2801a15f37d774c5e6e3
[
"MIT"
]
null
null
null
cogs/general/meta.py
Mystic-Alchemy/Vale.py
b4cc964d34672444c65e2801a15f37d774c5e6e3
[
"MIT"
]
null
null
null
import inspect
import os
import platform
import re
import discord
import psutil
from discord.ext import commands
from utils.colors import random_color
from utils.converter import BotCommand
from utils.paginator import Paginator
# Thanks, Milky
VERSION_HEADER_PATTERN = re.compile(r'^## (\d+\.\d+\.\d+) - (\d{4}-\d{2}-\d{2}|Unreleased)$')
CHANGE_TYPE_PATTERN = re.compile(r'^### (Added|Changed|Deprecated|Removed|Fixed|Security)$')
def _is_bulleted(line):
return line.startswith(('* ', '- '))
def _changelog_versions(lines):
version = change_type = release_date = None
changes = {}
for line in lines:
line = line.strip()
if not line:
continue
match = VERSION_HEADER_PATTERN.match(line)
if match:
if version:
yield version, {'release_date': release_date, 'changes': changes.copy()}
version = match[1]
release_date = match[2]
changes.clear()
continue
match = CHANGE_TYPE_PATTERN.match(line)
if match:
change_type = match[1]
continue
if _is_bulleted(line):
changes.setdefault(change_type, []).append(line)
else:
changes[change_type][-1] += ' ' + line.lstrip()
yield version, {'release_date': release_date, 'changes': changes.copy()}
def _load_changes():
with open('CHANGELOG.md') as f:
return dict(_changelog_versions(f.readlines()))
_CHANGELOG = _load_changes()
def _format_line(line):
if _is_bulleted(line):
return '\u2022 ' + line[2:]
return line
def _format_changelog_without_embed(version):
changes = _CHANGELOG[version]
nl_join = '\n'.join
change_lines = '\n\n'.join(
f'{type_}\n{nl_join(map(_format_line, lines))}'
for type_, lines in changes['changes'].items()
)
return f'Version {version} \u2014 {changes["release_date"]}\n\n{change_lines}'
def _format_changelog_with_embed(version, url):
changes = _CHANGELOG[version]
nl_join = '\n'.join
change_lines = '\n\n'.join(
f'**__{type_}__**\n{nl_join(map(_format_line, lines))}'
for type_, lines in changes['changes'].items()
)
embed = discord.Embed(description=change_lines)
name = f'Version {version} \u2014 {changes["release_date"]}'
embed.set_author(name=name, url=url)
return embed
class Meta:
"""Primary a class that provides some meta information about the bot."""
def __init__(self, bot):
self.bot = bot
@property
def emojis(self):
return self.bot.bot_emojis
@staticmethod
async def _get_commits(repo):
cmd = r'git show -s HEAD~5..HEAD --format="[{}](https://github.com/' + repo + '/commit/%H) %s (%cr)"' # 10 commits
if os.name == 'posix':
cmd = cmd.format(r'\`%h\`')
else:
cmd = cmd.format(r'`%h`')
try:
revision = os.popen(cmd).read().strip()
except OSError:
revision = 'Couldn\'t fetch commits. Either a memory error or a non-existant repository was provided.'
return revision
@staticmethod
def _get_os_information(cpu, memory):
return inspect.cleandoc(f"""
**System information:**
```yaml
:Architecture: -{platform.architecture()[0]}-
:System: -{platform.system()}-
:Node: -{platform.node()}-
:Release: -{platform.release()}-
:Version: -{platform.version()}-
:Machine: -{platform.version()}-
:Processor: -{platform.processor()}-
:CPU usage: -{cpu}-
:Memory usage: -{memory}-
```
""")
@commands.command(name='about')
async def _about(self, ctx):
"""Get some cool information about the bot."""
pages = []
process = self.bot.process
cpu = process.cpu_percent() / psutil.cpu_count()
memory = process.memory_info().rss / float(2 ** 20)
latency = round(self.bot.latency * 1000, 2)
shards = len(self.bot.shards)
version = '.'.join(map(str, ctx.bot.version_info[:3]))
changelog = (
f'**{self.emojis.get("announcements")} Recent updates:**\n\n'
f'```css\n{_format_changelog_without_embed(version)}```'
)
commits = await self._get_commits('itsVale/Vale.py')
system = self._get_os_information(cpu, memory)
python = platform.python_version()
postgres = '.'.join(map(str, ctx.db.get_server_version()[:3]))
pages = [
(
f'[`Source Code`]({self.bot.source})\n'
f'[`Invite me with minimal perms`]({self.bot.minimal_invite_url})\n'
f'[`Invite me with full perms (Required for certain commands to work)`]({self.bot.invite_url})\n\n'
f'[__**Need help with something? Check out the support server!**__]({self.bot.support_server})'
),
(
f'{self.emojis.get("version")} Version: **{version}**\n'
f'{self.emojis.get("status")} Online for: **{self.bot.uptime}**\n'
f'{self.emojis.get("signal")} Latency: **{latency} ms**\n'
f'{self.emojis.get("server")} Guilds: **{self.bot.guild_count}**\n'
f'{self.emojis.get("cpu")} CPU usage: **{cpu:.2f}%**\n'
f'{self.emojis.get("memory")} RAM usage: **{memory:.2f} Mb**\n'
f'{self.emojis.get("shard")} Shards: **{shards}**\n'
f'{self.emojis.get("python")} Python version: **{python}**\n'
f'{self.emojis.get("discordpy")} discord.py version: **{discord.__version__}**\n'
f'{self.emojis.get("postgres")} PostgreSQL version: **{postgres}**\n'
),
system,
f'**\N{WRITING HAND} Latest commits:**\n\n' + commits,
changelog
]
paginator = Paginator(ctx, pages, per_page=1, title=f'{self.emojis.get("statistics")} Stats for Vale.py')
await paginator.interact()
@commands.command(name='source', aliases=['skid', 'steal'])
async def _source(self, ctx, *, command: BotCommand = None):
"""Displays the source code for a command.
If the source code has too many lines, it will send a GitHub URL instead.
"""
if not command:
return await ctx.send(self.bot.source)
paginator = commands.Paginator(prefix='```py')
source = command.callback.__code__
lines, firstlineno = inspect.getsourcelines(command.callback)
if len(lines) < 20:
for line in lines:
paginator.add_line(line.rstrip().replace('`', '\u200b'))
for page in paginator.pages:
await ctx.send(page)
return
lastline = firstlineno + len(lines) - 1
location = os.path.relpath(source.co_filename).replace('\\', '/')
url = f'<{self.bot.source}/tree/master/{location}#L{firstlineno}-L{lastline}>'
await ctx.send(url)
@commands.command(name='stats')
async def _stats(self, ctx):
"""Shows some usage statistics about this bot."""
content = (
f'__**Usage statistics:**__\n',
f'Commands invoked in total: **{self.bot.command_counter.get("total")}**',
f'Commands invoked in this guild: **{self.bot.command_counter.get(str(ctx.guild.id))}**',
f'Commands invoked in DMs: **{self.bot.command_counter.get("in DMs")}**\n',
f'And here are the commands, which were invoked successfully in total: **{self.bot.command_counter.get("succeeded")}**\n',
f'*Only applies to the period from since the bot was restarted for the last time until now.*',
)
if ctx.bot_has_embed_links():
await ctx.send(embed=discord.Embed(description='\n'.join(content), color=random_color()))
else:
await ctx.send('\n'.join(content))
def setup(bot):
bot.add_cog(Meta(bot))
# coding: utf-8
"""
Xero Payroll AU
This is the Xero Payroll API for orgs in Australia region. # noqa: E501
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from enum import Enum
class DeductionTypeCalculationType(Enum):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
allowed enum values
"""
FIXEDAMOUNT = "FIXEDAMOUNT"
PRETAX = "PRETAX"
POSTTAX = "POSTTAX"
18.166667
76
0.67156
793f60e8206d42961b695a224825f440d0855852
3,154
py
Python
grp_modules/util/log/base/util.py
JGU-VC/activation-pattern-analysis
14da42ad541ee4faf35d360a6e871fd44decd33d
[
"MIT"
]
null
null
null
grp_modules/util/log/base/util.py
JGU-VC/activation-pattern-analysis
14da42ad541ee4faf35d360a6e871fd44decd33d
[
"MIT"
]
null
null
null
grp_modules/util/log/base/util.py
JGU-VC/activation-pattern-analysis
14da42ad541ee4faf35d360a6e871fd44decd33d
[
"MIT"
]
null
null
null
import datetime
from os.path import basename
from html import escape
import git
from colored import fg, attr
def datestr_sort():
return datetime.datetime.now().strftime('%y%m%d-%H%M%S')
def datestr():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def get_git_revisions():
# check repo for SHA and diffs
repo = git.Repo(search_parent_directories=True)
name = basename(repo.working_dir)
sha = [repo.head.object.hexsha]
diffs = [repo.git.diff('HEAD')]
modules = [name]
# check also submodules for SHAs and diffs
if len(repo.submodules) > 0:
modules += [s.name for s in repo.submodules]
sha += [s.hexsha for s in repo.submodules]
diffs += [s.module().git.diff('HEAD') for s in repo.submodules]
return modules, sha, diffs
def training_header(state):
gpu = ("gpu" + str(state.all["gpu"][0]) if len(state.all["gpu"]) == 1 else "multigpu(%s)" % ",".join(str(g) for g in state.all["gpu"])) if "gpu" in state.all else "cpu"
s = [" ", "Experiment", state["tag"], "on", gpu, " "]
seed_mode = "seed: %s " % state["seed"] if "seed" in state and state["seed"] >= 0 else "random mode"
bar = "—" * len(" ".join(s)) # pylint: disable=blacklisted-name
s[1] = s[1]
s[2] = fg('red') + attr('bold') + s[2] + attr('reset')
s[3] = attr('dim') + s[3] + attr('reset')
s[4] = fg('red') + attr('bold') + s[4] + attr('reset')
print()
print(" ╭" + bar + "╮")
print(" │" + " ".join(s) + "│", attr('dim') + seed_mode + attr('reset'))
print(" ╰" + bar + "╯")
if "record" in state and state["record"]:
print(fg('red') + " Recording Log-Calls" + attr('reset'))
def html_summary(state, event):
html_repostate = "<ul style='list-style: circle'>" + ("".join("<li style='margin:0 3em;'>%s:%s:<code>%s</code></li>" % (name, "clean" if len(diff) == 0 else "<b>diverged</b>", sha[:7]) for (name, diff, sha) in state["repository_state"])) + "</ul>"
html_loaded_modules = "<ul style='list-style: circle'>" + ("".join("<li style='margin:0 3em;'>%s</li>" % s for s in state["loaded_modules"])) + "</ul>"
html_env = "<ul style='list-style: circle'>" + ("".join("<li style='margin:0 3em;'>%s: <code>%s</code></li>" % (name, ver) for (name, ver) in [("python", state["python"]), ("pytorch", state["pytorch"])])) + "</ul>"
html_prepend = """
<h1>Experiment on %s</h1>
<h1 style="font-size:120%%; margin-top: -0.25em;">%s</h1>
<b>Repository Status:</b></br> %s </br></br>
<b>CLI-Call:</b></br> <code><pre>%s</pre></code> </br></br>
<b>Loaded Modules:</b></br> %s </br></br>
<b>Environment:</b></br> %s </br></br>
""" % (state["date"], state["tag"], html_repostate, state["cli_overwrites"], html_loaded_modules, html_env)
html_diffs = "\n".join("""
<h1>Repository Diffs</h1>
<b><b>%s</b>:</b></br> <code><pre>%s</pre></code> </br></br>
""" % (module, escape(diff)) for module, diff, sha in state["repository_state"])
html_settings = html_prepend + "".join(event.settings_html())
return html_settings, html_diffs
def plot_every(state, steps):
return steps and state["main.current_batch"] % steps == 0
39.425
251
0.58941
793f619a7e8698942f2ce687ceec9b46d4464fe6
22,609
py
Python
openbot.py
n0tpetya/discordbot
dfbcc50b5c37fb2acaefe566bd93fc9980e214dc
[
"CC0-1.0"
]
null
null
null
openbot.py
n0tpetya/discordbot
dfbcc50b5c37fb2acaefe566bd93fc9980e214dc
[
"CC0-1.0"
]
1
2021-02-21T13:10:23.000Z
2021-02-21T13:10:23.000Z
openbot.py
n0tpetya/discordbot
dfbcc50b5c37fb2acaefe566bd93fc9980e214dc
[
"CC0-1.0"
]
1
2021-06-03T13:49:22.000Z
2021-06-03T13:49:22.000Z
import asyncio
import random
import discord
from discord import Member, Guild, User
from discord import Profile
from datetime import datetime
client = discord.Client(intents=discord.Intents.all())
antworten = ['Ja', 'Nein', 'Wahrscheinlich', 'Unwahrscheinlich', 'Vielleicht', 'Sehr wahrscheinlich',
'Sehr unwarscheinlich']
beleidigungen = []
uhrzeit = datetime.now().strftime('%H:%M')
status = ['Drinking coffee☕️', 'Eating something🧁', 'Playing Minecraft🎮', 'Playing CS:GO🎮', 'Playing GTA V🎮', 'Playing Rocket League🎮', 'Vibing🎷', 'Doing work👨🏼🔧',
'Meeting friends👨👨👦', 'Listening to music🎧', 'On the phone📞', 'Writing with friends📱', 'On a party🎭', 'Going out👫']
def is_not_pinned(cmess):
return not cmess.pinned
@client.event # Start
async def on_ready():
print('Eingeloggt als {}'.format(client.user.name)) # Startup succes MSG
print(uhrzeit)
client.loop.create_task(status_task())
async def status_task(): # Schleife die Status des Bots ändert
while True:
await client.change_presence(activity=discord.Game('Status 1'), status=discord.Status.online)
await asyncio.sleep(5)
await client.change_presence(activity=discord.Game('Status 2'),
status=discord.Status.online)
await asyncio.sleep(5)
await client.change_presence(activity=discord.Game('{}'.format(random.choice(status))), status=discord.Status.online)
await asyncio.sleep(5)
@client.event # Befehle
async def on_message(message):
if message.author.bot:
return
# Hilfe-Liste
if message.content.startswith(".help"):
embedhelp = discord.Embed(title='Bot-Commands',
description='',
color=0x04ff00)
embedhelp.add_field(name='.help', value='Zeigt dir diese Liste an',
inline=False)
embedhelp.add_field(name='!oracle <Frage>', value='Gibt dir die Antwort auf deine Frage',
inline=False)
embedhelp.add_field(name='!uinfo <User>', value='Zeigt Informationen über einen Nutzer',
inline=False)
embedhelp.add_field(name='!forum', value='Zeigt dir den Link zur Webseite',
inline=False)
embedhelp.add_field(name='!youtube', value='Zeigt dir den Link zu unserem YouTube Channel',
inline=False)
embedhelp.add_field(name='!support', value='Zeigt dir Support Möglichkeiten an',
inline=False)
embedhelp.add_field(name='!ticket', value='Du kannst damit bei Problemen ein Ticket erstellen und mit den Admins in Kontakt treten.')
embedhelp.add_field(name='Bot erstellt von', value='Game-Forum.net | Deine Gaming Community!')
embedhelp.set_footer(text='Text')
await message.channel.send(embed=embedhelp)
# Nutzerinfos
if message.content.startswith('!uinfo'):
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed = discord.Embed(title='Userinfo für {}'.format(member.name),
description='Informationen über: {}'.format(member.mention),
color=0x04ff00)
embed.add_field(name='Server beigetreten',
value=member.joined_at.strftime('%d. %m. %Y um %H:%M:%S Uhr'),
inline=True)
embed.add_field(name='Discord beigetreten',
value=member.created_at.strftime('%d. %m. %Y um %H:%M:%S Uhr'),
inline=True)
rollen = ''
for role in member.roles:
if not role.is_default():
rollen += '{} \r\n'.format(role.mention)
if rollen:
embed.add_field(name='Rollen: ', value=rollen, inline=True)
embed.add_field(name='Bewertung', value=('Gebe gerne eine Bewertung zu {} ab!'.format(member.mention)),
inline=False)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text='Text')
react = await message.channel.send(embed=embed)
await react.add_reaction('👍')
await react.add_reaction('👎')
else:
await message.channel.send("Der Nutzer muss auf dem Discord sein!")
else:
await message.channel.send("Bitte gib einen Nutzernamen an!")
# Links
if message.content.startswith('!forum'):
embed2 = discord.Embed(title='Forum-Adresse',
description='Link',
color=0xfffb00)
embed2.set_footer(text='Game-Forum.net Discord Bot')
await message.channel.send(embed=embed2)
# Support
if message.content.startswith('!support'):
embed3 = discord.Embed(title='Support Möglichkeiten',
description='Möglichkeiten um Support zu erhalten',
color=0xfffb00)
embed3.add_field(name='Forum Support', value='Text',
inline=True)
embed3.set_thumbnail(url='https://game-forum.net/wp-content/uploads/discord/support.png')
embed3.set_footer(text='Text')
await message.channel.send(embed=embed3)
# Team-join-leave-changename
# Join
if message.content.startswith('!jointeam') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
teammsg1 = ' '.join(args[2:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedjoin = discord.Embed(title='Team-Beitritt/Promotion',
description='Jemand ist dem Team beigetreten oder wurde befördert!',
color=0x22ff00)
embedjoin.add_field(name='Änderung', value='**{}**'.format(teammsg1),
inline=False)
embedjoin.set_thumbnail(url=member.avatar_url)
embedjoin.set_footer(text='Text')
await message.channel.send(embed=embedjoin)
# Leave
if message.content.startswith('!leaveteam') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
teammsg2 = ' '.join(args[2:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedleave = discord.Embed(title='Team-Leave/Degradierung',
description='Jemand hat das Team verlassen oder wurde degradiert!',
color=0xff0000)
embedleave.add_field(name='Änderung', value='**{}**'.format(teammsg2),
inline=False)
embedleave.set_thumbnail(url=member.avatar_url)
embedleave.set_footer(text='Text')
await message.channel.send(embed=embedleave)
# NameChange
if message.content.startswith('!nameteam') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
teammsg3 = ' '.join(args[2:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedchange = discord.Embed(title='Namensänderung',
description='Jemand hat seinen Namen geändert.',
color=0xfbff00)
embedchange.add_field(name='Änderung', value='**{}**'.format(teammsg3),
inline=False)
embedchange.set_thumbnail(url=member.avatar_url)
embedchange.set_footer(text='Text')
await message.channel.send(embed=embedchange)
# Geburtstag
if message.content.startswith('!birthday') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 2:
teammsg4 = ' '.join(args[1:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedbday = discord.Embed(title='Geburtstag',
description='Jemand feiert heute seinen Geburtstag! Gratuliere ihm!',
color=0x00ffdd)
embedbday.add_field(name='Informationen', value='**{}**'.format(teammsg4),
inline=False)
embedbday.set_thumbnail(url='https://game-forum.net/wp-content/uploads/discord/birthday.png')
embedbday.set_footer(text='Text')
await message.channel.send(embed=embedbday)
# Clearcommand
if message.content.startswith('!clear'):
if message.author.permissions_in(message.channel).manage_messages:
args = message.content.split(' ')
if len(args) == 2:
if args[1].isdigit():
count = int(args[1]) + 1
deleted = await message.channel.purge(limit=count, check=is_not_pinned)
embed4 = discord.Embed(title='Nachrichten gelöscht!',
description='Gelöschte Nachrichten (Angepinnte ausgeschlossen)',
color=0xff0000)
embed4.add_field(name='Anzahl gelöschter Nachrichten', value='{}'.format(len(deleted) - 1))
embed4.set_footer(text='Text')
await message.channel.send(embed=embed4)
await asyncio.sleep(3)
await message.channel.purge(limit=1, check=is_not_pinned)
else:
await message.channel.send('Bitte gib eine gültige Zahl ein!')
else:
await message.channel.send('Bitte gib eine gültige Zahl ein!')
else:
await message.channel.send('Du hast keine Berechtigung dazu!')
# Orakel
if message.content.startswith('!oracle'):
args = message.content.split(' ')
if len(args) >= 2:
frage = ' '.join(args[1:])
embed5 = discord.Embed(title='Deine Frage an das Orakel',
description='Die Antwort auf deine Frage (Ist vielleicht etwas schwammig aber besser als nix ._.)',
color=0xff0000)
if message.content.endswith('?'):
embed5.add_field(name='Frage', value='**{}**'.format(frage))
else:
embed5.add_field(name='Frage', value='**{}**'.format(frage) + '?')
embed5.add_field(name='Meine Antwort', value='{}'.format(random.choice(antworten)))
embed5.set_thumbnail(url='https://game-forum.net/wp-content/uploads/discord/support.png')
embed5.set_footer(text='Text')
await message.channel.send(embed=embed5)
else:
await message.channel.send("Bitte gib eine Frage an!")
# YouTube-Link
if message.content.startswith('!youtube'):
embedyoutube = discord.Embed(title='YouTube Kanal',
description='Link zum YouTube Kanal',
color=0xff0000)
embedyoutube.add_field(name='Link', value='Link')
embedyoutube.set_footer(text=Text')
await message.channel.send(embed=embedyoutube)
# Ban-System
if message.content.startswith('!ban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
if len(args) >= 2:
banreason = ' '.join(args[2:])
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed7 = discord.Embed(title='Benutzer gebannt',
description='Ein Benutzer wurde gebannt',
color=0xff0000)
embed7.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embed7.add_field(name='Grund', value='{}'.format(banreason))
embed7.set_footer(text='Text')
await message.channel.send(embed=embed7)
embedbandm = discord.Embed(title='Du wurdest gebannt!',
description='Du wurdest vom Discord gebannt!',
color=0xff0000)
embedbandm.add_field(name='Grund', value='{}'.format(banreason))
embedbandm.set_footer(text='Text')
try:
if not member.bot:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embedbandm)
except discord.errors.Forbidden:
print('Es konnte keine Bannachricht an {0} gesendet werden.'.format(member.name))
if member.bot:
print('Der User ist ein Bot.')
await member.ban()
else:
await message.channel.send(f'Kein user mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
if message.content.startswith('!unban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
unbanreason = ' '.join(args[2:])
if len(args) >= 2:
user: User = discord.utils.find(lambda m: args[1] in m.user.name, await message.guild.bans()).user
if user:
await message.guild.unban(user)
embed8 = discord.Embed(title='Benutzer entbannt',
description='Ein Benutzer wurde entbannt',
color=0x04ff00)
embed8.add_field(name='Name des Benutzers', value='**{}**'.format(user.name))
embed8.add_field(name='Grund', value='{}'.format(unbanreason))
embed8.set_footer(text='Game-Forum Discord Bot')
await message.channel.send(embed=embed8)
embedunbandm = discord.Embed(title='Du wurdest entbannt!',
description='Du wurdest vom Discord entbannt!',
color=0x04ff00)
embedunbandm.add_field(name='Grund', value='{}'.format(unbanreason))
embedunbandm.set_footer(text='Du kannst dem Discord nun wieder beitreten!')
try:
if not user.bot:
if not user.dm_channel:
await user.create_dm()
await user.dm_channel.send(embed=embedunbandm)
except discord.errors.Forbidden:
print('Es konnte keine Unbannachricht an {0} gesendet werden.'.format(member.name))
if user.bot:
print('Der User ist ein Bot.')
else:
await message.channel.send(f'Kein user mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
#News-Command
if message.content.startswith('!news') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
titel = '{}'.format(args[1])
news = ' ' .join(args[2:])
embednews = discord.Embed(title='Eine neue News ist erschienen!',
description='',
color=0x04ff00)
embednews.add_field(name='{}'.format(titel), value='{}'.format(news),
inline=False)
embednews.set_footer(text="Text")
await message.channel.purge(limit=1, check=is_not_pinned)
await message.channel.send(embed = embednews)
if message.content.startswith('!kick') and message.author.guild_permissions.kick_members:
args = message.content.split(' ')
kickreason = ' '.join(args[2:])
if len(args) >= 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed9 = discord.Embed(title='Benutzer gekickt',
description='Ein Benutzer wurde gekickt',
color=0xfffb00)
embed9.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embed9.add_field(name='Grund', value='{}'.format(kickreason))
embed9.set_footer(text='Game-Forum Discord Bot')
embedkickdm = discord.Embed(title='Du wurdest gekickt!',
description='Du wurdest vom Discord gekickt!',
color=0xfffb00)
embedkickdm.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embedkickdm.add_field(name='Grund', value='{}'.format(kickreason))
embedkickdm.set_footer(text='Du kannst dem Discord weiterhin beitreten!')
await message.channel.send(embed=embed9)
try:
if not member.bot:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embedkickdm)
except discord.errors.Forbidden:
print('Es konnte keine Kicknachricht an {0} gesendet werden.'.format(member.name))
if member.bot:
print('Der user ist ein Bot.')
await member.kick()
else:
await message.channel.send(f'Kein User mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
if message.content.startswith('!warn') and message.author.guild_permissions.manage_nicknames:
args = message.content.split(' ')
warnreason = ' '.join(args[2:])
if len(args) >= 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embedwarn = discord.Embed(title='Benutzer verwarnt',
description='Ein Benutzer wurde verwarnt',
color=0xfffb00)
embedwarn.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embedwarn.add_field(name='Grund', value='{}'.format(warnreason))
embedwarn.set_footer(text='Game-Forum Discord Bot')
embedwarndm = discord.Embed(title='Du wurdest verwarnt',
description='Du wurdest am Discord verwarnt!',
color=0xfffb00)
embedwarndm.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embedwarndm.add_field(name='Grund', value='{}'.format(warnreason))
embedwarndm.set_footer(text='Du kannst dem Discord weiterhin beitreten!')
await message.channel.send(embed=embedwarn)
try:
if not member.bot:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embedwarndm)
except discord.errors.Forbidden:
print('Es konnte keine Warnnachricht an {0} gesendet werden.'.format(member.name))
if member.bot:
print('Der User ist ein Bot.')
else:
await message.channel.send(f'Kein user mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
@client.event # Beitritt des Servers
async def on_member_join(member): # Willkommennachricht und Rollenvergabe für User
mitgliedrolle = discord.utils.get(member.guild.roles, name='User')
botrolle = discord.utils.get(member.guild.roles, name='BOT')
willkommenschannel_id = # Channel ID
willkommenschannel = client.get_channel(willkommenschannel_id)
await willkommenschannel.send('Hey **{}**, willkommen auf dem Server!'.format(member.mention))
embed = discord.Embed(title='Willkommen {} auf dem Game-Forun.net Discord Server! 👍 😀'.format(member.name),
description='Wir heißen dich herzlich Willkommen',
color=0x04ff00)
embed.set_thumbnail(url=member.avatar_url)
await willkommenschannel.send(embed=embed)
if not member.bot:
await member.add_roles(mitgliedrolle)
embed = discord.Embed(title='Hey **{}**, willkommen auf dem Discord Server!'.format(member.name), description='Wir heißen dich herzlich willkommen und wünsche dir eine angenehme Zeit auf dem Server.', color=0x04ff00)
try:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embed)
except discord.errors.Forbidden:
print('Ich konnte keine persönliche Willkommennachricht an **{}** senden'.format(member.name))
if member.bot:
await member.add_roles(botrolle)
client.run('Bot Token')
53.07277
229
0.551196
793f63c0920df0d21bc494c2459c68d0f82d1bb4
1,101
py
Python
tests/test_read_and_write_state.py
Sage-Bionetworks/SynapseBucketMover
9f9607e6646543832a1d708dd5747dc58a2ead97
[
"Apache-2.0"
]
1
2018-09-19T18:05:25.000Z
2018-09-19T18:05:25.000Z
tests/test_read_and_write_state.py
Sage-Bionetworks/SynapseBucketMover
9f9607e6646543832a1d708dd5747dc58a2ead97
[
"Apache-2.0"
]
1
2018-10-08T16:48:31.000Z
2018-10-08T17:12:16.000Z
tests/test_read_and_write_state.py
Sage-Bionetworks/SynapseBucketMover
9f9607e6646543832a1d708dd5747dc58a2ead97
[
"Apache-2.0"
]
1
2018-10-08T15:47:23.000Z
2018-10-08T15:47:23.000Z
'''
Created on Aug 30, 2018
@author: bhoff
'''
import unittest
import tempfile
import os
import SynapseBucketMover
from nose.tools import assert_raises, assert_equal, assert_is_none, assert_is_not_none, assert_in, assert_false, assert_true
class Test(unittest.TestCase):
def setUp(self):
self.dir = tempfile.TemporaryDirectory()
def tearDown(self):
if self.dir is not None:
os.remove(os.path.join(self.dir.name, "state.txt"))
def testPersistence(self):
state=SynapseBucketMover.readState(self.dir.name)
assert_equal(0, state['filesProcessedCount'])
assert_equal([], state['treePageMarker'])
state['filesProcessedCount']=100
state['treePageMarker']=[{'parentId':'syn123','nextPageToken':'abc'},{'parentId':'syn456','nextPageToken':'def'}]
SynapseBucketMover.writeState(self.dir.name, state)
readState = SynapseBucketMover.readState(self.dir.name)
assert_equal(state, readState)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testPersistence']
unittest.main()
import asyncio
import logging
import sys
from discord_host import create_bot
sys.path.append("..\\lib")
import msvcrt
import os
import traceback
from mgylabs.db.database import run_migrations
from mgylabs.db.paths import DB_URL, SCRIPT_DIR
from mgylabs.services.telemetry_service import TelemetryReporter
from mgylabs.utils.version import VERSION
from core.controllers.ipc_controller import IPCController
os.chdir(os.path.dirname(os.path.abspath(__file__)))
log = logging.getLogger(__name__)
def instance_already_running():
if VERSION.is_canary():
lock_name = "mkbot_can.lock"
else:
lock_name = "mkbot.lock"
fd = os.open(f"{os.getenv('TEMP')}\\{lock_name}", os.O_WRONLY | os.O_CREAT)
try:
msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
already_running = False
except IOError:
already_running = True
return already_running
async def dry_run():
errorlevel = await create_bot(True)
return errorlevel
def main():
if instance_already_running():
print("MKBotCore is already running.")
sys.exit(0)
run_migrations(SCRIPT_DIR, DB_URL)
if "--dry-run" in sys.argv:
errorlevel = asyncio.run(dry_run())
if errorlevel == 0:
print("Test Passed")
else:
print("Test Failed")
sys.exit(errorlevel)
if "--port" in sys.argv:
try:
loc = sys.argv.index("--port")
PORT = int(sys.argv[loc + 1])
except Exception:
PORT = 8979
else:
PORT = 8979
ipc_controller = IPCController(PORT)
ipc_controller.run()
if __name__ == "__main__":
error = 0
try:
TelemetryReporter.start()
main()
except SystemExit as e:
error = e.code
except Exception as e:
TelemetryReporter.Exception(e)
traceback.print_exc()
error = 1
finally:
TelemetryReporter.terminate()
sys.exit(error)
22.033708
79
0.643039
793f648ec29ee752316e0fd3aa1702566bec1c78
13,756
py
Python
soft_sort/sinkhorn.py
deepneuralmachine/google-research
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
[
"Apache-2.0"
]
23,901
2018-10-04T19:48:53.000Z
2022-03-31T21:27:42.000Z
soft_sort/sinkhorn.py
deepneuralmachine/google-research
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
[
"Apache-2.0"
]
891
2018-11-10T06:16:13.000Z
2022-03-31T10:42:34.000Z
soft_sort/sinkhorn.py
deepneuralmachine/google-research
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
[
"Apache-2.0"
]
6,047
2018-10-12T06:31:02.000Z
2022-03-31T13:59:28.000Z
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A Sinkhorn implementation for 1D Optimal Transport.
Sinkhorn algorithm was introduced in 1967 by R. Sinkhorn in the article
"Diagonal equivalence to matrices with prescribed row and column sums." in
The American Mathematical Monthly. It is an iterative algorithm that turns an
input matrix (here the kernel matrix corresponding to transportation costs) into
a matrix with prescribed (a, b) (row, colums) sum marginals by multiplying it on
the left an right by two diagonal matrices.
"""
from typing import Tuple
import gin
import tensorflow.compat.v2 as tf
def center(cost, f, g):
if f.shape.rank == 2:
return cost - f[:, :, tf.newaxis] - g[:, tf.newaxis, :]
elif f.shape.rank == 3:
return cost[:, :, :, tf.newaxis] - (
f[:, :, tf.newaxis, :] + g[:, tf.newaxis, :, :])
def softmin(cost, f, g, eps, axis):
return -eps * tf.reduce_logsumexp(-center(cost, f, g) / eps, axis=axis)
def error(cost, f, g, eps, b):
b_target = tf.math.reduce_sum(transport(cost, f, g, eps), axis=1)
return tf.reduce_max((tf.abs(b_target - b) / b)[:])
def transport(cost, f, g, eps):
return tf.math.exp(-center(cost, f, g) / eps)
def cost_fn(x, y,
power):
"""A transport cost in the form |x-y|^p and its derivative."""
# Check if data is 1D.
if x.shape.rank == 2 and y.shape.rank == 2:
# If that is the case, it is convenient to use pairwise difference matrix.
xy_difference = x[:, :, tf.newaxis] - y[:, tf.newaxis, :]
if power == 1.0:
cost = tf.math.abs(xy_difference)
derivative = tf.math.sign(xy_difference)
elif power == 2.0:
cost = xy_difference**2.0
derivative = 2.0 * xy_difference
else:
abs_diff = tf.math.abs(xy_difference)
cost = abs_diff**power
derivative = power * tf.math.sign(xy_difference) * abs_diff**(power - 1.0)
return cost, derivative
# Otherwise data is high dimensional, in form [batch,n,d]. L2 distance used.
elif x.shape.rank == 3 and y.shape.rank == 3:
x2 = tf.reduce_sum(x**2, axis=2)
y2 = tf.reduce_sum(y**2, axis=2)
cost = (x2[:, :, tf.newaxis] + y2[:, tf.newaxis, :] -
tf.matmul(x, y, transpose_b=True))**(power / 2)
derivative = None
return cost, derivative
@gin.configurable
def sinkhorn_iterations(x,
y,
a,
b,
power = 2.0,
epsilon = 1e-3,
epsilon_0 = 1e-1,
epsilon_decay = 0.95,
threshold = 1e-2,
inner_num_iter = 5,
max_iterations = 2000):
"""Runs the Sinkhorn's algorithm from (x, a) to (y, b).
Args:
x: Tensor<float>[batch, n, d]: the input point clouds.
y: Tensor<float>[batch, m, d]: the target point clouds.
a: Tensor<float>[batch, n, q]: weights of each input point across batch. Note
that q possible variants can be considered (for parallelism).
Sums along axis 1 must match that of b to converge.
b: Tensor<float>[batch, m, q]: weights of each input point across batch. As
with a, q possible variants of weights can be considered.
power: (float) the power of the distance for the cost function.
epsilon: (float) the level of entropic regularization wanted.
epsilon_0: (float) the initial level of entropic regularization.
epsilon_decay: (float) a multiplicative factor applied at each iteration
until reaching the epsilon value.
threshold: (float) the relative threshold on the Sinkhorn error to stop the
Sinkhorn iterations.
inner_num_iter: (int32) the Sinkhorn error is not recomputed at each
iteration but every inner_num_iter instead to avoid computational overhead.
max_iterations: (int32) the maximum number of Sinkhorn iterations.
Returns:
A 5-tuple containing: the values of the conjugate variables f and g, the
final value of the entropic parameter epsilon, the cost matrix and the number
of iterations.
"""
max_outer_iterations = max_iterations // inner_num_iter
loga = tf.math.log(a)
logb = tf.math.log(b)
cost, d_cost = cost_fn(x, y, power)
def body_fn(f, g, eps, num_iter):
for _ in range(inner_num_iter):
g = eps * logb + softmin(cost, f, g, eps, axis=1) + g
f = eps * loga + softmin(cost, f, g, eps, axis=2) + f
eps = tf.math.maximum(eps * epsilon_decay, epsilon)
return [f, g, eps, num_iter + inner_num_iter]
def cond_fn(f, g, eps, num_iter):
return tf.math.reduce_all([
tf.math.less(num_iter, max_iterations),
tf.math.reduce_any([
tf.math.greater(eps, epsilon),
tf.math.greater(error(cost, f, g, eps, b), threshold)
])
])
f, g, eps, iterations = tf.while_loop(
cond_fn,
body_fn, [
tf.zeros_like(loga),
tf.zeros_like(logb),
tf.cast(epsilon_0, dtype=x.dtype),
tf.constant(0, dtype=tf.int32)
],
parallel_iterations=1,
maximum_iterations=max_outer_iterations + 1)
return f, g, eps, cost, d_cost, iterations
def transport_implicit_gradients(derivative_cost,
transport_matrix, eps, b, d_p):
"""Application of the transpose of the Jacobians dP/dx and dP/db.
This is applied to a perturbation of the size of the transport matrix.
Required to back-propagate through Sinkhorn's output.
Args:
derivative_cost: the derivative of the cost function.
transport_matrix: the obtained transport matrix tensor.
eps: the value of the entropic regualarization parameter.
b: the target weights.
d_p: the perturbation of the transport matrix.
Returns:
A list of two tensor that correspond to the application of the transpose
of dP/dx and dP/db on dP.
"""
batch_size = tf.shape(b)[0]
m = tf.shape(b)[1]
invmargin1 = tf.math.reciprocal(tf.reduce_sum(transport_matrix, axis=2))
m1 = invmargin1[:, 1:, tf.newaxis] * transport_matrix[:, 1:, :]
m1 = tf.concat([tf.zeros([tf.shape(m1)[0], 1, tf.shape(m1)[2]]), m1], axis=1)
invmargin2 = tf.math.reciprocal(tf.reduce_sum(transport_matrix, axis=1))
m2 = invmargin2[:, :, tf.newaxis] * tf.transpose(transport_matrix, [0, 2, 1])
eye_m = tf.eye(m, batch_shape=[batch_size])
schur = eye_m - tf.linalg.matmul(m2, m1)
def jac_b_p_transpose(d_p):
"""Transposed of the jacobian of the transport w.r.t the target weights."""
d_p_p = d_p * transport_matrix
u_f = tf.reduce_sum(d_p_p, axis=2) / eps
u_g = tf.reduce_sum(d_p_p, axis=1) / eps
m1_tranpose_u_f = tf.linalg.matvec(m1, u_f, transpose_a=True)
to_invert = tf.concat(
[m1_tranpose_u_f[:, :, tf.newaxis], u_g[:, :, tf.newaxis]], axis=2)
inverses = tf.linalg.solve(tf.transpose(schur, [0, 2, 1]), to_invert)
inv_m1_tranpose_u_f, inv_u_g = inverses[:, :, 0], inverses[:, :, 1]
jac_2 = -inv_m1_tranpose_u_f + inv_u_g
return eps * jac_2 / b
def jac_x_p_transpose(d_p):
"""Transposed of the jacobian of the transport w.r.t the inputs."""
d_p_p = d_p * transport_matrix
c_x = -tf.reduce_sum(derivative_cost * d_p_p, axis=2) / eps
u_f = tf.math.reduce_sum(d_p_p, axis=2) / eps
u_g = tf.math.reduce_sum(d_p_p, axis=1) / eps
m1_tranpose_u_f = tf.linalg.matvec(m1, u_f, transpose_a=True)
to_invert = tf.concat(
[m1_tranpose_u_f[:, :, tf.newaxis], u_g[:, :, tf.newaxis]], axis=2)
inverses = tf.linalg.solve(tf.transpose(schur, [0, 2, 1]), to_invert)
inv_m1_tranpose_u_f, inv_u_g = inverses[:, :, 0], inverses[:, :, 1]
jac_1 = u_f + tf.linalg.matvec(
m2, inv_m1_tranpose_u_f - inv_u_g, transpose_a=True)
jac_2 = -inv_m1_tranpose_u_f + inv_u_g
jac_1 = jac_1 * tf.reduce_sum(m1 * derivative_cost, axis=2)
jac_2 = tf.linalg.matvec(
tf.transpose(m2, [0, 2, 1]) * derivative_cost, jac_2)
return c_x + jac_1 + jac_2
return [jac_x_p_transpose(d_p), jac_b_p_transpose(d_p)]
def autodiff_sinkhorn(x, y, a, b,
**kwargs):
"""A Sinkhorn function that returns the transportation matrix.
This function back-propagates through the computational graph defined by the
Sinkhorn iterations.
Args:
x: [N, n, d] the input batch of points clouds
y: [N, m, d] the target batch points clouds.
a: [N, n, q] q probability weight vectors for the input point cloud. The sum
of all elements of b along axis 1 must match that of a.
b: [N, m, q] q probability weight vectors for the target point cloud. The sum
of all elements of b along axis 1 must match that of a.
**kwargs: additional parameters passed to the sinkhorn algorithm. See
sinkhorn_iterations for more details.
Returns:
A tf.Tensor representing the optimal transport matrix and the regularized OT
cost.
"""
f, g, eps, cost, _, _ = sinkhorn_iterations(x, y, a, b, **kwargs)
return transport(cost, f, g, eps)
def implicit_sinkhorn(x, y, a, b,
**kwargs):
"""A Sinkhorn function using the implicit function theorem.
That is to say differentiating optimality confiditions to recover Jacobians.
Args:
x: the input batch of 1D points clouds
y: the target batch 1D points clouds.
a: the intput weight of each point in the input point cloud. The sum of all
elements of b must match that of a to converge.
b: the target weight of each point in the target point cloud. The sum of all
elements of b must match that of a to converge.
**kwargs: additional parameters passed to the sinkhorn algorithm. See
sinkhorn_iterations for more details.
Returns:
A tf.Tensor representing the optimal transport matrix.
"""
@tf.custom_gradient
def _aux(x, b):
"""Auxiliary closure to compute custom gradient over x and b."""
x = tf.stop_gradient(x)
b = tf.stop_gradient(b)
f, g, eps, cost, d_cost, _ = sinkhorn_iterations(x, y, a, b, **kwargs)
# This centering is crucial to ensure Jacobian is invertible.
# This centering is also assumed in the computation of the
# transpose-Jacobians themselves.
to_remove = f[:, 0]
f = f - to_remove[:, tf.newaxis]
g = g + to_remove[:, tf.newaxis]
forward = transport(cost, f, g, eps)
def grad(d_p):
return transport_implicit_gradients(d_cost, forward, eps, b, d_p)
return forward, grad
return _aux(x, b)
@gin.configurable
def sinkhorn(x,
y,
a,
b,
implicit = True,
**kwargs):
"""A Sinkhorn function that returns the transportation matrix.
This function back-propagates through the computational graph defined by the
Sinkhorn iterations.
Args:
x: the input batch of points clouds
y: the target batch points clouds.
a: the intput weight of each point in the input point cloud. The sum of all
elements of b must match that of a to converge.
b: the target weight of each point in the target point cloud. The sum of all
elements of b must match that of a to converge.
implicit: whether to run the autodiff version of the backprop or the implicit
computation of the gradient. The implicit version is more efficient in
terms of both speed and memory, but might be less stable numerically. It
requires high-accuracy in the computation of the optimal transport itself.
**kwargs: additional parameters passed to the sinkhorn algorithm. See
sinkhorn_iterations for more details.
Returns:
A tf.Tensor representing the optimal transport matrix.
"""
if implicit:
if x.shape.rank == 2:
return implicit_sinkhorn(x, y, a, b, **kwargs)
else:
raise ValueError('`Implicit` not yet implemented for multivariate data')
return autodiff_sinkhorn(x, y, a, b, **kwargs)
def sinkhorn_divergence(x,
y,
a,
b,
only_x_varies = False,
**kwargs):
"""A simple implementation of the Sinkhorn divergence.
This function back-propagates through the computational graph defined by the
Sinkhorn iterations.
Args:
x: [N,n,d] the input batch of multivariate (dimension d) points clouds
y: [N,m,d] the input batch of multivariate (dimension d) points clouds
a: [N,n] probability weights per batch
b: [N,n] probability weights per batch
only_x_varies: <bool> if only x varies, that flag should be set to True,
in order to avoid computing the divergence between y and itself.
**kwargs: additional parameters passed to the sinkhorn algorithm. See
sinkhorn_iterations for more details.
Returns:
A tf.Tensor representing the optimal transport matrix.
"""
f_xy, g_xy = sinkhorn_iterations(x, y, a, b, **kwargs)[:2]
f_xx, g_xx = sinkhorn_iterations(x, x, a, a, **kwargs)[:2]
if only_x_varies:
return tf.reduce_sum((f_xy - 0.5 * f_xx - 0.5 * g_xx) * a +
g_xy * b, axis=1)
else:
f_yy, g_yy = sinkhorn_iterations(y, y, b, b, **kwargs)[:2]
return (tf.reduce_sum((f_xy - 0.5 * f_xx - 0.5 * g_xx) * a, axis=1) +
tf.reduce_sum((g_xy - 0.5 * f_yy - 0.5 * g_yy) * b, axis=1))
38.640449
80
0.659858
793f658a971badf281adaa1a799ebc15d9327324
9,395
py
Python
ebirdtaiwan/dash_apps/TaipeiCompetition.py
even311379/EbirdTaiwan2020
2c1aa4d7346b5ade909d45f7c245fa4988394124
[
"MIT"
]
null
null
null
ebirdtaiwan/dash_apps/TaipeiCompetition.py
even311379/EbirdTaiwan2020
2c1aa4d7346b5ade909d45f7c245fa4988394124
[
"MIT"
]
null
null
null
ebirdtaiwan/dash_apps/TaipeiCompetition.py
even311379/EbirdTaiwan2020
2c1aa4d7346b5ade909d45f7c245fa4988394124
[
"MIT"
]
null
null
null
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import random
import json
import pandas as pd
import numpy as np
import datetime
from fall.models import SignupData, Survey, SurveyObs
import plotly.express as px
import re
import eb_passwords
from collections import Counter
DEMO_MODE = True
app = DjangoDash(
'ThreeTeams',
add_bootstrap_links=True,
) # replaces dash.Dash
# prevent setup complex map twice
def empty_map():
fig = go.Figure(go.Scattermapbox(lat=['38.91427',],lon=['-77.02827',]))
fig.update_layout(
mapbox=dict(
center=dict(lat=23.973793,lon=120.979703),
zoom=8,
style='white-bg')
)
return fig
def draw_area_map():
with open('../helper_files/TaiwanCounties_simple.geojson') as f:
geoj = json.load(f)
data = pd.DataFrame()
NorthTaiwan_geo = []
for f in geoj['features']:
if f['properties']['COUNTYNAME'] in ['新北市', '臺北市']:
NorthTaiwan_geo.append(f)
geoj['features'] = NorthTaiwan_geo
RN = []
for k in range(len(geoj['features'])):
temp = geoj['features'][k]['properties']['COUNTYNAME']+geoj['features'][k]['properties']['TOWNNAME']
geoj['features'][k]['id'] = temp
RN.append(temp)
# and insert id to df
data['Name'] = RN
'''
prepare the map data, the team color with most checklist in each town
'''
if datetime.date.today() < datetime.date(2020, 10, 1):
for t in ['彩鷸隊', '家燕隊', '大冠鷲隊']:
data[t] = np.random.randint(5, 40, len(data))
else:
temp_town = []
for t in ['彩鷸隊', '家燕隊', '大冠鷲隊']:
temp_town.append(Survey.objects.filter(team=t, is_valid=True).values_list('county',flat=True))
if not temp_town[0] and not temp_town[1] and not temp_town[2]:
return empty_map()
for t in ['彩鷸隊', '家燕隊', '大冠鷲隊']:
towns = Survey.objects.filter(team=t, is_valid=True).values_list('county',flat=True)
county_counts = Counter(towns)
nc = [0] * len(RN)
for k in county_counts:
nc[RN.index(k)] = county_counts[k]
data[t] = nc
winner = data[['彩鷸隊', '家燕隊', '大冠鷲隊']].idxmax(axis=1).tolist()
# handles when the score are all the same
BL = (data['彩鷸隊']==data['家燕隊']) & (data['家燕隊']==data['大冠鷲隊']) & (data['彩鷸隊']==data['大冠鷲隊'])
for i, b in enumerate(BL):
if b:
winner[i] = '平手'
data['winner'] = winner
# data['winner'] = [random.choice(['A','B','C','E']) for i in range(len(data))]
# t = [random.choice(['123','456','789','555']) for i in range(len(data))]
area_map = px.choropleth_mapbox(data, geojson=geoj, color="winner",
locations="Name",center={"lat": 24.9839, "lon":121.65},
mapbox_style="carto-positron", zoom=10, hover_data=['彩鷸隊', '家燕隊', '大冠鷲隊'],
color_discrete_map={'彩鷸隊':'#2E92D3', '家燕隊':'#EF8018', '大冠鷲隊':'#FFF101','平手':'rgba(255,255,255,0.3)'},
)
area_map.update_traces(
hovertemplate='''
<b>%{location}</b><br>
上傳清單數<br><br>
彩鷸隊: %{customdata[0]}<br>
家燕隊: %{customdata[1]}<br>
大冠鷲隊: %{customdata[2]}<extra></extra>
''',
hoverlabel=dict(font=dict(size=16)),
# showlegend=False,
marker=dict(line=dict(width=1,color='#000')),
)
area_map.update_layout(
mapbox = dict(
accesstoken=eb_passwords.map_box_api_key,
),
margin={"r":0,"t":0,"l":0,"b":0},
legend=dict(
title='上傳清單數比較',
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
bgcolor='rgba(0,0,0,0)'),
# this is a severe bug, dragmode = False should just remove drag, but its not working for me...
)
return area_map
dashboard_content = html.Div(dbc.Row([
dbc.Col([
html.Div([
html.Div(html.Img(src='/static/img/fall/farmbird.png', className='px-3'),className='team_card_col'),
html.Div([
html.Div([html.Div('隊員人數:'), html.Div('',className='ml-auto', id='team1_n_people')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳清單數:'),html.Div('',className='ml-auto', id='team1_n_list')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳鳥種數:'),html.Div('',className='ml-auto', id='team1_n_species')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳鳥隻數:'),html.Div('',className='ml-auto', id='team1_n_count')], className='d-flex w-75'),
], className='team_card_col')
],className='single_team_card'),
html.Div([
html.Div(html.Img(src='/static/img/fall/citybird.png', className='px-3'),className='team_card_col'),
html.Div([
html.Div([html.Div('隊員人數:'), html.Div('',className='ml-auto', id='team2_n_people')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳清單數:'),html.Div('',className='ml-auto', id='team2_n_list')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳鳥種數:'),html.Div('',className='ml-auto', id='team2_n_species')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳鳥隻數:'),html.Div('',className='ml-auto', id='team2_n_count')], className='d-flex w-75'),
], className='team_card_col')
],className='single_team_card'),
html.Div([
html.Div(html.Img(src='/static/img/fall/forestbird.png', className='px-3'),className='team_card_col'),
html.Div([
html.Div([html.Div('隊員人數:'), html.Div('',className='ml-auto', id='team3_n_people')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳清單數:'),html.Div('',className='ml-auto', id='team3_n_list')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳鳥種數:'),html.Div('',className='ml-auto', id='team3_n_species')], className='d-flex w-75 pb-2'),
html.Div([html.Div('總上傳鳥隻數:'),html.Div('',className='ml-auto', id='team3_n_count')], className='d-flex w-75'),
], className='team_card_col')
],className='single_team_card'),
], md=4),
dbc.Col(
dcc.Graph(figure = empty_map(),id='area_map', className='prgression_map', config=dict(displayModeBar=False)),
className=''
, md=8)
]))
app.layout = html.Div([
html.Div(dashboard_content,className='dashboard_container'),
dcc.Location(id='url'),
html.Div('',id='empty',style={'display':'none'})
]
)
app.clientside_callback(
"""
function(path) {
console.log(path)
return path+',' + String(window.innerWidth) + ',' + String(window.innerHeight);
}
""",
Output('empty', 'children'),
[Input('url', 'pathname')]
)
@app.callback(
[Output('team1_n_people', 'children'),
Output('team1_n_list', 'children'),
Output('team1_n_species', 'children'),
Output('team1_n_count', 'children'),
Output('team2_n_people', 'children'),
Output('team2_n_list', 'children'),
Output('team2_n_species', 'children'),
Output('team2_n_count', 'children'),
Output('team3_n_people', 'children'),
Output('team3_n_list', 'children'),
Output('team3_n_species', 'children'),
Output('team3_n_count', 'children'),
Output('area_map','figure')],
[Input('empty','children'),],
)
def reload_refresh(helper_string):
t1np = len(SignupData.objects.filter(team='彩鷸隊'))
t2np = len(SignupData.objects.filter(team='家燕隊'))
t3np = len(SignupData.objects.filter(team='大冠鷲隊'))
if datetime.date.today() < datetime.date(2020,10,1):
t1nl = 63
t1ns = 43
t1nc = 1204
t2nl = 53
t2ns = 51
t2nc = 1652
t3nl = 70
t3ns = 38
t3nc = 1301
else:
t1nl = len(Survey.objects.filter(team='彩鷸隊', is_valid=True))
t1_rns = SurveyObs.objects.filter(survey__team = '彩鷸隊', survey__is_valid=True).values_list('species_name', flat=True)
t1ns = len(set([re.sub(r' ?\(.*?\)','',s) for s in t1_rns]))
t1nc = sum(SurveyObs.objects.filter(survey__team = '彩鷸隊', survey__is_valid=True).values_list('amount', flat=True))
t2nl = len(Survey.objects.filter(team='家燕隊', is_valid=True))
t2_rns = SurveyObs.objects.filter(survey__team = '家燕隊', survey__is_valid=True).values_list('species_name', flat=True)
t2ns = len(set([re.sub(r' ?\(.*?\)','',s) for s in t2_rns]))
t2nc = sum(SurveyObs.objects.filter(survey__team = '家燕隊', survey__is_valid=True).values_list('amount', flat=True))
t3nl = len(Survey.objects.filter(team='大冠鷲隊', is_valid=True))
t3_rns = SurveyObs.objects.filter(survey__team = '大冠鷲隊', survey__is_valid=True).values_list('species_name', flat=True)
t3ns = len(set([re.sub(r' ?\(.*?\)','',s) for s in t3_rns]))
t3nc = sum(SurveyObs.objects.filter(survey__team = '大冠鷲隊', survey__is_valid=True).values_list('amount', flat=True))
return t1np, t1nl, t1ns, t1nc, t2np, t2nl, t2ns, t2nc, t3np, t3nl, t3ns, t3nc, draw_area_map()
39.64135
149
0.589143
793f65a7de5440917b3f9e1d1480c8aebb966844
1,091
py
Python
supervisor/resolution/validate.py
peddamat/home-assistant-supervisor-test
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
[
"Apache-2.0"
]
1
2021-09-22T00:15:17.000Z
2021-09-22T00:15:17.000Z
supervisor/resolution/validate.py
peddamat/home-assistant-supervisor-test
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
[
"Apache-2.0"
]
100
2021-07-22T06:14:22.000Z
2022-03-31T06:16:16.000Z
supervisor/resolution/validate.py
peddamat/home-assistant-supervisor-test
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
[
"Apache-2.0"
]
2
2021-09-22T00:13:58.000Z
2021-09-22T15:06:27.000Z
"""Validate resolution configuration schema."""
from pathlib import Path
from typing import List
import voluptuous as vol
from ..const import ATTR_CHECKS, ATTR_ENABLED
def get_valid_modules(folder) -> List[str]:
"""Validate check name."""
module_files = Path(__file__).parent.joinpath(folder)
if not module_files.exists():
raise vol.Invalid(f"Module folder '{folder}' not found!")
return [
module.stem
for module in module_files.glob("*.py")
if module.name not in ("base.py", "__init__.py")
]
SCHEMA_CHECK_CONFIG = vol.Schema(
{
vol.Required(ATTR_ENABLED, default=True): bool,
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_CHECKS_CONFIG = vol.Schema(
{
vol.Required(check, default=SCHEMA_CHECK_CONFIG({})): SCHEMA_CHECK_CONFIG
for check in get_valid_modules("checks")
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_RESOLUTION_CONFIG = vol.Schema(
{
vol.Required(
ATTR_CHECKS, default=SCHEMA_CHECKS_CONFIG({})
): SCHEMA_CHECKS_CONFIG,
},
extra=vol.REMOVE_EXTRA,
)
# 8347
# (?i)(?s)<a[^>]+?href="?(?<url>[^"]+)"?>(?<innerHtml>.+?)</a\s*>
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"is<a"*5000+"@1 _SLQ_2"
import re2 as re
from time import perf_counter
regex = """(?i)(?s)<a[^>]+?href="?(?<url>[^"]+)"?>(?<innerHtml>.+?)</a\s*>"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "is<a" * i * 10000 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
30.263158
77
0.556522
793f66d0f2bbc6ea08da45e55aaf36870e67a256
1,185
py
Python
otcextensions/sdk/dws/v1/flavor.py
zsoltn/python-otcextensions
4c0fa22f095ebd5f9636ae72acbae5048096822c
[
"Apache-2.0"
]
null
null
null
otcextensions/sdk/dws/v1/flavor.py
zsoltn/python-otcextensions
4c0fa22f095ebd5f9636ae72acbae5048096822c
[
"Apache-2.0"
]
null
null
null
otcextensions/sdk/dws/v1/flavor.py
zsoltn/python-otcextensions
4c0fa22f095ebd5f9636ae72acbae5048096822c
[
"Apache-2.0"
]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack import resource
class FlavorSpec(resource.Resource):
#: type of the value
mtype = resource.Body('type')
#: type of the value
mvalue = resource.Body('value')
#: unit of the value
munit = resource.Body('unit')
def __str__(self):
return self.mtype
class Flavor(resource.Resource):
base_path = '/node_types'
resources_key = 'node_types'
allow_list = True
# Properties
#: Spec name. *Type: str*
spec_name = resource.Body('spec_name')
#: detail. *Type: str*
detail = resource.Body('detail', type=list, list_type=FlavorSpec)
31.184211
75
0.709705
793f66dce80ceff06f76e9cda0246ed1d93b4b27
9,385
py
Python
pyTD/market/__init__.py
kevmartian/pyTD
ea8f374a0c9eddf5a46233f5946e68a67c181009
[
"MIT"
]
16
2018-10-25T06:03:56.000Z
2021-06-14T03:53:01.000Z
pyTD/market/__init__.py
td-ameritrade/pyTD
28099664c8a3b6b7e60f62f5e5c120f01e3530af
[
"MIT"
]
5
2018-11-11T19:34:25.000Z
2021-01-16T03:39:45.000Z
pyTD/market/__init__.py
td-ameritrade/pyTD
28099664c8a3b6b7e60f62f5e5c120f01e3530af
[
"MIT"
]
14
2019-03-12T04:16:40.000Z
2021-04-15T20:26:04.000Z
# MIT License
# Copyright (c) 2018 Addison Lynch
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pyTD.instruments.base import Instruments
from pyTD.market.hours import MarketHours
from pyTD.market.quotes import Quotes
from pyTD.market.movers import Movers
from pyTD.market.options import Options
from pyTD.market.price_history import PriceHistory
def get_fundamentals(*args, **kwargs):
"""
Retrieve fundamental data for a diven symbol or CUSIP ID
Parameters
----------
symbol: str
A CUSIP ID, symbol, regular expression, or snippet (depends on the
value of the "projection" variable)
output_format: str, default "pandas", optional
Desired output format. "pandas" or "json"
"""
kwargs.update({"projection": "fundamental"})
return Instruments(*args, **kwargs).execute()
def get_quotes(*args, **kwargs):
"""
Function for retrieving quotes from the Get Quotes endpoint.
Parameters
----------
symbols : str, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing up to 100 stock symbols.
output_format: str, default 'pandas', optional
Desired output format (json or DataFrame)
kwargs: additional request parameters (see _TDBase class)
"""
return Quotes(*args, **kwargs).execute()
def get_market_hours(*args, **kwargs):
"""
Function to retrieve market hours for a given market from the Market
Hours endpoint
Parameters
----------
market: str, default EQUITY, optional
The market to retrieve operating hours for
date : string or DateTime object, (defaults to today's date)
Operating date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2015', '1/1/15', 'Jan, 1, 1980')
output_format: str, default 'pandas', optional
Desired output format (json or DataFrame)
kwargs: additional request parameters (see _TDBase class)
"""
return MarketHours(*args, **kwargs).execute()
def get_movers(*args, **kwargs):
"""
Function for retrieving market moveers from the Movers endpoint
Parameters
----------
index: str
The index symbol to get movers from
direction: str, default up, optional
Return up or down movers
change: str, default percent, optional
Return movers by percent change or value change
output_format: str, default 'pandas', optional
Desired output format (json or DataFrame)
kwargs: additional request parameters (see _TDBase class)
"""
return Movers(*args, **kwargs).execute()
def get_option_chains(*args, **kwargs):
"""
Function to retrieve option chains for a given symbol from the Option
Chains endpoint
Parameters
----------
contractType: str, default ALL, optional
Desired contract type (CALL, PUT, ALL)
strikeCount: int, optional
Number of strikes to return above and below the at-the-money price
includeQuotes: bool, default False, optional
Include quotes for options in the option chain
strategy: str, default None, optional
Passing a value returns a strategy chain (SINGLE or ANALYTICAL)
interval: int, optional
Strike interval for spread strategy chains
strike: float, optional
Filter options that only have a certain strike price
range: str, optional
Returns options for a given range (ITM, OTM, etc.)
fromDate: str or datetime.datetime object, optional
Only return options after this date
toDate: str or datetime.datetime object, optional
Only return options before this date
volatility: float, optional
Volatility to use in calculations (for analytical strategy chains)
underlyingPrice: float, optional
Underlying price to use in calculations (for analytical strategy
chains)
interestRate: float, optional
Interest rate to use in calculations (for analytical strategy
chains)
daysToExpiration: int, optional
Days to expiration to use in calulations (for analytical
strategy chains)
expMonth: str, optional
Expiration month (format JAN, FEB, etc.) to use in calculations
(for analytical strategy chains), default ALL
optionType: str, optional
Type of contracts to return (S: standard, NS: nonstandard,
ALL: all contracts)
output_format: str, optional, default 'pandas'
Desired output format
api: pyTD.api.api object, optional
A pyTD api object. If not passed, API requestor defaults to
pyTD.api.default_api
kwargs: additional request parameters (see _TDBase class)
"""
return Options(*args, **kwargs).execute()
def get_price_history(*args, **kwargs):
"""
Function to retrieve price history for a given symbol over a given period
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Desired symbols for retrieval
periodType: str, default DAY, optional
The type of period to show
period: int, optional
The number of periods to show
frequencyType: str, optional
The type of frequency with which a new candle is formed
frequency: int, optional
The number of frequencyType to includ with each candle
startDate : string or DateTime object, optional
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2015', '1/1/15', 'Jan, 1, 1980')
endDate : string or DateTime object, optional
Ending date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2015', '1/1/15', 'Jan, 1, 1980')
extended: str or bool, default 'True'/True, optional
True to return extended hours data, False for regular hours only
output_format: str, default 'pandas', optional
Desired output format (json or DataFrame)
"""
return PriceHistory(*args, **kwargs).execute()
# def get_history_intraday(symbols, start, end, interval='1m', extended=True,
# output_format='pandas'):
# """
# Function to retrieve intraday price history for a given symbol
# Parameters
# ----------
# symbols : string, array-like object (list, tuple, Series), or DataFrame
# Desired symbols for retrieval
# startDate : string or DateTime object, optional
# Starting date, timestamp. Parses many different kind of date
# representations (e.g., 'JAN-01-2015', '1/1/15', 'Jan, 1, 1980')
# endDate : string or DateTime object, optional
# Ending date, timestamp. Parses many different kind of date
# representations (e.g., 'JAN-01-2015', '1/1/15', 'Jan, 1, 1980')
# interval: string, default '1m', optional
# Desired interval (1m, 5m, 15m, 30m, 60m)
# needExtendedHoursData: str or bool, default 'True'/True, optional
# True to return extended hours data, False for regular hours only
# output_format: str, default 'pandas', optional
# Desired output format (json or DataFrame)
# """
# result = PriceHistory(symbols, start_date=start, end_date=end,
# extended=extended,
# output_format=output_format).execute()
# if interval == '1m':
# return result
# elif interval == '5m':
# sample = result.index.floor('5T').drop_duplicates()
# return result.reindex(sample, method='ffill')
# elif interval == '15m':
# sample = result.index.floor('15T').drop_duplicates()
# return result.reindex(sample, method='ffill')
# elif interval == '30m':
# sample = result.index.floor('30T').drop_duplicates()
# return result.reindex(sample, method='ffill')
# elif interval == '60m':
# sample = result.index.floor('60T').drop_duplicates()
# return result.reindex(sample, method='ffill')
# else:
# raise ValueError("Interval must be 1m, 5m, 15m, 30m, or 60m.")
# def get_history_daily(symbols, start, end, output_format='pandas'):
# return PriceHistory(symbols, start_date=start, end_date=end,
# frequency_type='daily',
# output_format=output_format).execute()
40.627706
79
0.677571
793f66e0db6132f4f891ae834a6bd6e9ceec3e59
1,666
py
Python
sample.py
Doctorado-ML/mufs
a0f172ac13870043494050ad988a279369043e1b
[
"MIT"
]
3
2021-11-25T12:29:28.000Z
2022-03-09T21:50:09.000Z
sample.py
Doctorado-ML/mfs
a0f172ac13870043494050ad988a279369043e1b
[
"MIT"
]
2
2021-10-05T09:25:12.000Z
2022-03-10T11:50:58.000Z
sample.py
Doctorado-ML/mufs
a0f172ac13870043494050ad988a279369043e1b
[
"MIT"
]
null
null
null
import warnings
import time
from mufs import MUFS
from mufs.Metrics import Metrics
from stree import Stree
import numpy as np
from scipy.io import arff
mufsc = MUFS(discrete=False)
filename = "conn-bench-sonar-mines-rocks.arff"
data, meta = arff.loadarff(filename)
train = np.array([data[i] for i in meta])
X = train.T
X = X[:, :-1].astype("float64")
y = data["clase"]
m, n = X.shape
print("* Differential entropy in X")
for i in range(n):
print(i, Metrics.differential_entropy(X[:, i], k=10))
print("* Information Gain")
print("- Continuous features")
print(Metrics.information_gain_cont(X, y))
for i in range(n):
print(i, Metrics.information_gain_cont(X[:, i], y))
# Classification
warnings.filterwarnings("ignore")
print("CFS")
now = time.time()
cfs_f = mufsc.cfs(X, y).get_results()
time_cfs = time.time() - now
print(cfs_f, "items: ", len(cfs_f), f"time: {time_cfs:.3f} seconds")
print("FCBF")
now = time.time()
fcbf_f = mufsc.fcbf(X, y, 0.07).get_results()
time_fcbf = time.time() - now
print(fcbf_f, "items: ", len(fcbf_f), f"time: {time_fcbf:.3f} seconds")
now = time.time()
print("IWSS")
iwss_f = mufsc.iwss(X, y, 0.5).get_results()
time_iwss = time.time() - now
print(iwss_f, "items: ", len(iwss_f), f"time: {time_iwss:.3f} seconds")
print("X.shape=", X.shape)
clf = Stree(random_state=0)
print("Accuracy whole dataset", clf.fit(X, y).score(X, y))
clf = Stree(random_state=0)
print("Accuracy cfs", clf.fit(X[:, cfs_f], y).score(X[:, cfs_f], y))
clf = Stree(random_state=0)
print("Accuracy fcfb", clf.fit(X[:, fcbf_f], y).score(X[:, fcbf_f], y))
clf = Stree(random_state=0)
print("Accuracy iwss", clf.fit(X[:, iwss_f], y).score(X[:, iwss_f], y))
import botocore
import calendar
import datetime
import durationpy
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from past.builtins import basestring
from urllib.parse import urlparse
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path
def human_size(num, suffix='B'):
"""
Convert bytes length to a human-readable version
"""
for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, 'Yi', suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring);
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
with io.open(full, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if '= Flask(' in line:
app = line.split('= Flask(')[0].strip()
if '=Flask(' in line:
app = line.split('=Flask(')[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
app_module = package_module + '.' + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return 'python{}.{}'.format(*sys.version_info)
def get_runtime_from_python_version():
"""
"""
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 6:
return 'python3.6'
elif sys.version_info[1] <= 7:
return 'python3.7'
elif sys.version_info[1] <= 8:
return 'python3.8'
else:
return "python3.9"
##
# Async Tasks
##
def get_topic_name(lambda_name):
""" Topic name generation """
return '%s-zappa-async' % lambda_name
##
# Event sources / Kappa
##
def get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.function
import kappa.restapi
import kappa.event_source.base
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.event_source.cloudwatch
import kappa.policy
import kappa.role
import kappa.awsclient
class PseudoContext:
def __init__(self):
return
class PseudoFunction:
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super().__init__(context, config)
self._lambda = kappa.awsclient.create_client(
'lambda', context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
'list_event_source_mappings',
FunctionName=function.name,
EventSourceArn=self.arn)
LOG.debug(response)
if len(response['EventSourceMappings']) > 0:
uuid = response['EventSourceMappings'][0]['UUID']
return uuid
def add(self, function):
try:
response = self._lambda.call(
'create_event_source_mapping',
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to add event source')
def enable(self, function):
self._config['enabled'] = True
try:
response = self._lambda.call(
'update_event_source_mapping',
UUID=self._get_uuid(function),
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to enable event source')
def disable(self, function):
self._config['enabled'] = False
try:
response = self._lambda.call(
'update_event_source_mapping',
FunctionName=function.name,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to disable event source')
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'update_event_source_mapping',
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn)
LOG.debug(response)
except Exception:
LOG.exception('Unable to update event source')
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call(
'delete_event_source_mapping',
UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug('getting status for event source %s', self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'get_event_source_mapping',
UUID=self._get_uuid(function))
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug('event source %s does not exist', self.arn)
response = None
else:
LOG.debug('No UUID for event source %s', self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get('filters')
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
'set_subscription_attributes',
SubscriptionArn=subscription['SubscriptionArn'],
AttributeName='FilterPolicy',
AttributeValue=json.dumps(self.filters)
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception('Unable to add filters for SNS topic %s', self.arn)
def add(self, function):
super().add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': kappa.event_source.kinesis.KinesisEventSource,
's3': kappa.event_source.s3.S3EventSource,
'sns': ExtendedSnsEventSource,
'sqs': SqsEventSource,
'events': kappa.event_source.cloudwatch.CloudWatchEventSource
}
arn = event_source['arn']
_, _, svc, _ = arn.split(':', 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError('Unknown event source: {0}'.format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == 's3':
split_arn = lambda_arn.split(':')
arn_front = ':'.join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ':'.join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
return 'successful' if event_source_obj.status(funk) else 'failed'
else:
return 'exists'
return 'dryrun'
def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
return this_version != top_version
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [filename for filename in files if filename.endswith('.py') or filename.endswith('.pyc')]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [filename for filename in subfiles if filename.endswith('.py') or filename.endswith('.pyc')]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name+'.py'
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if (len(name) < 3 or len(name) > 63):
return False
# Bucket names must not contain uppercase characters or underscores.
if (any(x.isupper() for x in name)):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get('headers') or {}
multi_headers = (event.get('multiValueHeaders') or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ', '.join(multi_headers[h])
return multi_headers
32.481229
143
0.599611
793f6926ab932618a83e95c40989d4b314413217
528
py
Python
backend/home/migrations/0001_load_initial_data.py
crowdbotics-apps/musix-32595
cdc9560eb55a71d6b2d8291892fe478b63b30c80
[
"FTL",
"AML",
"RSA-MD"
]
null
null
null
backend/home/migrations/0001_load_initial_data.py
crowdbotics-apps/musix-32595
cdc9560eb55a71d6b2d8291892fe478b63b30c80
[
"FTL",
"AML",
"RSA-MD"
]
null
null
null
backend/home/migrations/0001_load_initial_data.py
crowdbotics-apps/musix-32595
cdc9560eb55a71d6b2d8291892fe478b63b30c80
[
"FTL",
"AML",
"RSA-MD"
]
null
null
null
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "musix-32595.botics.co"
site_params = {
"name": "Musix",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
"jsonl": "django.core.serializers.jsonl",
}
_serializers = {}
class BadSerializer:
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.items() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Return an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list, allow_cycles=False):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
If allow_cycles is True, return the best-effort ordering that will respect
most of dependencies but ignore some of them to break the cycles.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
if all(d not in models or d in model_list for d in deps):
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
if allow_cycles:
# If cycles are allowed, add the last skipped model and ignore
# its dependencies. This could be improved by some graph
# analysis to ignore as few dependencies as possible.
model, _ = skipped.pop()
model_list.append(model)
else:
raise RuntimeError(
"Can't resolve dependencies for %s in serialized app list."
% ', '.join(
model._meta.label
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__)
),
)
model_dependencies = skipped
return model_list
35.073171
92
0.657279
793f6f6ef7d0f017038221bb09f6f5e9b134b970
5,815
py
Python
Metrics/tse_plots.py
Ginfung/FSSE
c54510b78dfceec76c74893e8514ed5177b504e5
[
"MIT"
]
3
2018-08-07T13:54:57.000Z
2020-02-24T11:46:05.000Z
Metrics/tse_plots.py
Ginfung/FSSE
c54510b78dfceec76c74893e8514ed5177b504e5
[
"MIT"
]
1
2019-01-15T23:22:19.000Z
2019-01-15T23:22:19.000Z
Metrics/tse_plots.py
Ginfung/FSSE
c54510b78dfceec76c74893e8514ed5177b504e5
[
"MIT"
]
1
2019-01-09T15:50:47.000Z
2019-01-09T15:50:47.000Z
from __future__ import division
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import pickle
from scipy.stats import ttest_ind
from numpy import median
import scipy
import numpy
import debug
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
y_titles = ['Generational Distance\n(lower is better)', 'Generated Spread\n(lower is better)',
'Pareto Front Size\n(higher is better)', 'Hypervolume\n(higher is better)']
def bound(x):
q25, q75 = numpy.percentile(x, [25, 75])
iqr = q75 - q25
m = median(x)
return m - 1.5 * iqr, m + 1.5 * iqr
def plot(model, t_i, yround=4, lessIsBetter=True, ax=None):
"""
:param model:
:param t_i: 0-gd, 1-gs, 2-pfs, 3-hv
:return:
"""
plt.setp(ax.spines.values(), linewidth=0.5)
with open('../Experiments/tse_rs/all.stat', 'r') as f:
data = pickle.load(f)
data = data[model]
ground = data['ground'][t_i]
sway = data['sway'][t_i]
moea = data['moea'][t_i]
sanity = data['sanity'][t_i]
if t_i == 2:
sanity = sway # useless
data = [sanity, ground, sway, moea]
if t_i == 2: data = data[1:]
# fig = plt.figure(1, figsize=(3.5, 2.3))
# ax = plt.subplot(12, 4, panelId)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlim([0.1, 1.8])
x_ticks = [0.5, 0.95, 1.4, 1.85]
if t_i == 2:
x_ticks = x_ticks[:-1]
box = ax.boxplot(data, patch_artist=True, widths=0.3, positions=x_ticks, showfliers=False)
miny = min(bound(ground)[0], bound(sway)[0], bound(moea)[0], bound(sanity)[0])
if miny < 0: miny = 0
maxy = max(bound(ground)[1], bound(sway)[1], bound(moea)[1], bound(sanity)[1])
miny *= 0.8
maxy *= 1.2
ax.set_ylim([miny, maxy])
miny = round(miny, yround)
maxy = round(maxy, yround)
y_ticks = [miny,
# miny + (maxy - miny) * 0.44,
miny + (maxy - miny) * 0.45, maxy * 0.90]
y_ticks = [round(i, yround) for i in y_ticks]
ax.set_yticks(y_ticks)
ax.tick_params(labelsize=6)
ax.tick_params(
axis='x', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
right='off',
labelbottom='off') # labels along the bottom edge are off
red = ['red', '#FFE6E6']
green = ['green', '#76E959']
orange = ['orange', '#FFE5CC']
colors = ['black']
fcolors = ['#B2B2B2']
les = min(len(ground), len(moea))
p = scipy.stats.wilcoxon(ground[:les], moea[:les])[1]
if p < 0.005 and (abs(median(ground) - median(moea)) < median(moea) * 0.1):
colors.append(orange[0])
fcolors.append(orange[1])
elif (lessIsBetter and median(ground) - median(moea) < median(moea) * 0.2) or (
(not lessIsBetter) and median(ground) - median(moea) > -median(moea) * 0.2):
colors.append(green[0])
fcolors.append(green[1])
else:
colors.append(red[0])
fcolors.append(red[1])
les = min(len(sway), len(moea))
p = scipy.stats.wilcoxon(sway[:les], moea[:les])[1]
# pdb.set_trace()
if p < 0.005 and (abs(median(sway) - median(moea)) < median(moea) * 0.1):
colors.append(orange[0])
fcolors.append(orange[1])
elif (lessIsBetter and median(sway) - median(moea) < median(moea) * 0.1) or (
(not lessIsBetter) and median(sway) - median(moea) > -median(moea) * 0.1):
colors.append(green[0])
fcolors.append(green[1])
else:
colors.append(red[0])
fcolors.append(red[1])
colors.append(orange[0])
fcolors.append(orange[1])
if t_i == 2:
colors = colors[1:]
fcolors = fcolors[1:]
for ml, b, col, fcol in zip(box['medians'], box['boxes'], colors, fcolors):
b.set_color(col)
b.set_linewidth(0.5)
b.set(facecolor=fcol)
ml.set_color(col) # median
ml.set_linewidth(0.5)
# ax.yaxis.set_major_formatter(FixedOrderFormatter(-2))
ax.get_yaxis().get_major_formatter().set_useOffset(True)
if model == 'osp':
ax.set_title(y_titles[t_i], style='italic', fontsize=6)
if t_i == 0:
ax.set_ylabel(model, fontsize=6)
if model == 'linux':
ax.tick_params(labelbottom='on')
if t_i == 2:
ax.set_xticks(x_ticks)
ax.set_xticklabels(('GT', 'SWAY', 'MOEA'), fontsize=6, rotation=50)
else:
ax.set_xticks(x_ticks)
ax.set_xticklabels(['RAND', 'GT', 'SWAY', 'MOEA'], fontsize=6, rotation=50)
if __name__ == '__main__':
tPlot, axes = plt.subplots(nrows=12, ncols=4, figsize=(6, 7))
for i, m in enumerate(
['osp', 'osp2', 'ground', 'flight', 'p3a', 'p3b', 'p3c', 'webportal', 'eshop', 'fiasco', 'freebsd',
'linux']):
print(m)
plot(m, 0, 4, True, axes[i][0])
plot(m, 1, 2, True, axes[i][1])
plot(m, 2, 0, False, axes[i][2])
plot(m, 3, 2, False, axes[i][3])
plt.tight_layout()
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.show()
# using python tool. save as XXX.pdf
31.432432
111
0.591402
793f71acf283560e97f7437849f9242ab095b86d
3,570
py
Python
src/add-control-workflow/graph.py
kfnxu/superset-playground
d11271ddb799e74a2a2f5a9e90f1159c46faf3b6
[
"Apache-2.0"
]
1
2019-05-30T21:49:14.000Z
2019-05-30T21:49:14.000Z
src/add-control-workflow/graph.py
kfnxu/superset-playground
d11271ddb799e74a2a2f5a9e90f1159c46faf3b6
[
"Apache-2.0"
]
null
null
null
src/add-control-workflow/graph.py
kfnxu/superset-playground
d11271ddb799e74a2a2f5a9e90f1159c46faf3b6
[
"Apache-2.0"
]
null
null
null
import simplejson as json
from superset import app
##pip install neo4jrestclient
from neo4jrestclient.client import GraphDatabase
config = app.config
class BaseGraph(object):
def __init__(self):
host = config.get("GRAPHDB_HOST") #""
user = config.get("GRAPHDB_USER") #""
pw = config.get("GRAPHDB_PW") #""
self.db = GraphDatabase(host, username=user, password=pw)
def get_search_categories_graph_db(self, in_vis_type="line", in_vis_id="1", in_vis_container_id="1"):
vis_id = str(1)
if int(in_vis_id) > 0:
vis_id = in_vis_id
vis_container_id = str(1)
if int(in_vis_container_id) > 0:
vis_container_id = in_vis_container_id
q = 'match(a:VisContainer) - [r:has] - (p) where a.tid=' + str(vis_id) + ' '
q += 'return p.tid as ID, p.tname as NAME, p.tcat as CATEGORY order by toInteger(p.tid) '
q += 'union MATCH (a:SearchCategory ) WHERE a.tcat in ["measure", "cause", "risk", "location", "age_select", "sex", "unit", "vistype", "viscontainer", "model_group", "year_group"] RETURN a.tid AS ID, a.tname AS NAME, a.tcat as CATEGORY order by toInteger(a.tid), a.tname'
print('get_search_categories_graph_db vis_id, vis_container_id, q', in_vis_id, in_vis_container_id, q)
results = self.db.query(q, returns=(str, unicode, str))
### how to use dataset
#for r in results:
# print("(%s)-[%s]-[%s]" % (r[0], r[1], r[2]))
return results
def get_search_setting_graph_db(self, in_vis_type="line", in_vis_id="1", in_vis_container_id="1"):
print("get_search_setting_graph_db in_vis_type, in_vis_id, in_vis_container_id", in_vis_type, in_vis_id, in_vis_container_id)
vis_id = str(1)
if int(in_vis_id) > 0:
vis_id = in_vis_id
vis_container_id = str(1)
if int(in_vis_container_id) > 0:
vis_container_id = in_vis_container_id
#q = 'match (a:dataResult {tname:"forecasting"}) - [:contains] -> (f:VisSection {tname:"FBD Compare"}) - [:contains] -> (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname, f.tname, g.tname, h.tname order by f.tname, g.pos union match ( a:dataResult {tname:"forecasting"}) - [:typeOf] -> (b:Visualization {tid:' + str(vis_id) + '}) - [r2:typeOf] -> (c:VisContainer {tid:' + str(vis_container_id) + '} ) - [:contains] -> (d:VisView {tname:"simpleFlowView"}) -[:contains] -> (e:VisControlPanel) -[:contains] -> (f:VisSection) - [:contains] -> (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname,f.tname, g.tname, h.tname order by toInteger(f.pos), toInteger(g.pos)'
q = 'match (a:dataResult {tname:"forecasting"}) - [:contains] -> (f:VisSection {tname:"FBD Compare"}) - [:contains] -> (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname, f.tname, g.tname, h.tname order by f.tname, g.pos union match (s:SearchCategory {tcat: "charttype", tname:"' + str(in_vis_type) + '"} ) - [r:has] - (f:VisSection) with f match ( a:dataResult {tname:"forecasting"}) - [:typeOf] -> (b:Visualization {tid:' + str(vis_id) + '}) - [r2:typeOf] -> (c:VisContainer {tid:' + str(vis_container_id) + '} ) - [:contains] -> (d:VisView {tname:"simpleFlowView"}) -[:contains] -> (e:VisControlPanel) -[:contains] -> (f) - [:contains] -> (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname, f.tname, g.tname, h.tname order by g.pos';
results = self.db.query(q, returns=(str, str, str, str))
print("get_search_setting_graph_db results q", q, results)
return results
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import os
import pprint
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario.midokura.midotools import helper
from tempest.scenario.midokura import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
# path should be described in tempest.conf
SCPATH = "/network_scenarios/"
class TestNetworkBasicMultitenants(manager.AdvancedNetworkScenarioTest):
"""
Description:
Overlapping IP in different tenants
Scenario:
VMs with overlapping ip address in different
tenants should not interfare each other
Prerequisites:
- 2 tenants
- 1 network for each tenant
- 1 subnet with same CIDR for each tenant
Steps:
This testing requires that an option
"allow_overlapping_ips = True
" is configured in neutron.conf file
1. launch VMs with overlapping IP
2. make sure they are not interfered
3. curl http://169.254.169.254/latest/meta-data-instance-id
and make sure it correctly identifies the VM
Expected result:
should succeed
"""
def setUp(self):
super(TestNetworkBasicMultitenants, self).setUp()
self.scenarios = self.setup_topology(
os.path.abspath(
'{0}scenario_basic_multitenant.yaml'.format(SCPATH)))
def _route_and_ip_test(self, ssh_client, remote_ip):
LOG.info("Trying to get the list of ips")
try:
net_info = ssh_client.get_ip_list()
LOG.debug(net_info)
pattern = re.compile(
'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
_list = pattern.findall(net_info)
LOG.debug(_list)
self.assertIn(remote_ip, _list)
route_out = ssh_client.exec_command("sudo /sbin/route -n")
self._check_default_gateway(route_out, remote_ip)
LOG.info(route_out)
except Exception as inst:
LOG.info(inst)
raise
def _check_metadata(self, ssh_client, server):
meta_out = ssh_client.exec_command(
"curl http://169.254.169.254/latest/meta-data/instance-id")
meta_instid = meta_out.split('-')[1]
server_instid = server['OS-EXT-SRV-ATTR:instance_name'].split('-')[1]
LOG.debug("metadata instance-id: " + meta_instid)
LOG.debug("server instance-id: " + server_instid)
self.assertTrue(meta_instid == server_instid)
def _check_default_gateway(self, route_out, internal_ip):
try:
rtable = helper.Routetable.build_route_table(route_out)
LOG.debug(rtable)
self.assertTrue(any([r.is_default_route() for r in rtable]))
except Exception as inst:
LOG.info(inst.args)
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_network_basic_multitenant(self):
for creds_and_scenario in self.scenarios:
self._multitenant_test(creds_and_scenario)
LOG.info("test finished, tearing down now ....")
def _multitenant_test(self, creds_and_scenario):
# the access_point server should be the last one in the list
creds = creds_and_scenario['credentials']
self.set_context(creds)
servers_and_keys = creds_and_scenario['servers_and_keys']
ap_details = servers_and_keys[-1]
networks = ap_details['server']['addresses']
hops = [(ap_details['FIP'].floating_ip_address,
ap_details['keypair']['private_key'])]
for element in servers_and_keys[:-1]:
server = element['server']
name = server['addresses'].keys()[0]
LOG.debug("Server dict\n:" + pprint.pformat(server))
if any(i in networks.keys() for i in server['addresses'].keys()):
remote_ip = server['addresses'][name][0]['addr']
privatekey = element['keypair']['private_key']
hops.append((remote_ip, privatekey))
ssh_client = self.setup_tunnel(hops)
self._route_and_ip_test(ssh_client, hops[-1][0])
self._check_metadata(ssh_client, server)
38.277778
78
0.642753
793f722cbe2ad5113256020b067861f53010b5a5
207
py
Python
tests/test_discovery/discovery.py
smok-serwis/bunia
61f032c990f2be8cf9ed5182e847309dd30af91b
[
"MIT"
]
null
null
null
tests/test_discovery/discovery.py
smok-serwis/bunia
61f032c990f2be8cf9ed5182e847309dd30af91b
[
"MIT"
]
2
2016-12-21T10:04:38.000Z
2017-01-10T19:11:00.000Z
tests/test_discovery/discovery.py
smok-serwis/bunia
61f032c990f2be8cf9ed5182e847309dd30af91b
[
"MIT"
]
null
null
null
# coding=UTF-8
from __future__ import absolute_import, division, print_function
from bunia.api import Command
class Cmd1(Command):
NAME = 'cmd1'
class Cmd2(Command):
NAME = 'cmd2'
COMMAND = Cmd1
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AccessReviewInstancesAssignedForMyApprovalOperations(object):
"""AccessReviewInstancesAssignedForMyApprovalOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2021_03_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
schedule_definition_id, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AccessReviewInstanceListResult"]
"""Get access review instances assigned for my approval.
:param schedule_definition_id: The id of the access review schedule definition.
:type schedule_definition_id: str
:param filter: The filter to apply on the operation. Other than standard filters, one custom
filter option is supported : 'assignedToMeToReview()'. When one specified
$filter=assignedToMeToReview(), only items that are assigned to the calling user to review are
returned.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewInstanceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewInstanceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessReviewInstanceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'scheduleDefinitionId': self._serialize.url("schedule_definition_id", schedule_definition_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AccessReviewInstanceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances'} # type: ignore
def get_by_id(
self,
schedule_definition_id, # type: str
id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AccessReviewInstance"
"""Get single access review instance assigned for my approval.
:param schedule_definition_id: The id of the access review schedule definition.
:type schedule_definition_id: str
:param id: The id of the access review instance.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewInstance, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewInstance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessReviewInstance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'scheduleDefinitionId': self._serialize.url("schedule_definition_id", schedule_definition_id, 'str'),
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessReviewInstance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}'} # type: ignore
47.713514
156
0.667951
793f76b65644d9095d5683b97171095ae3f2182e
262
py
Python
truvari/annos/bed_anno.py
mlinderm/truvari
e84804b20214775ba625819568e74a1920c99a06
[
"MIT"
]
null
null
null
truvari/annos/bed_anno.py
mlinderm/truvari
e84804b20214775ba625819568e74a1920c99a06
[
"MIT"
]
null
null
null
truvari/annos/bed_anno.py
mlinderm/truvari
e84804b20214775ba625819568e74a1920c99a06
[
"MIT"
]
null
null
null
"""
Annotate over a generic bedfile
Need to build the 'tree maker' first?
Need to make sure and set it so
--multimatch
And also, need to specify that col[3] (bed name)
must be the INFO=;oaiwef
and the header lines "^#" must be the header information
"""
20.153846
56
0.709924
793f7756b5fa6a9793124091250f6b248f6045e0
32,852
py
Python
fatiando/mesher/mesh.py
XuesongDing/fatiando
57a0e0802fde2e53628511d3a7a2964e69bb309a
[
"BSD-3-Clause"
]
179
2015-03-08T08:50:45.000Z
2022-03-20T08:19:05.000Z
fatiando/mesher/mesh.py
XuesongDing/fatiando
57a0e0802fde2e53628511d3a7a2964e69bb309a
[
"BSD-3-Clause"
]
207
2015-01-12T17:04:57.000Z
2021-01-08T23:36:11.000Z
fatiando/mesher/mesh.py
XuesongDing/fatiando
57a0e0802fde2e53628511d3a7a2964e69bb309a
[
"BSD-3-Clause"
]
114
2015-01-29T18:51:22.000Z
2022-03-25T12:35:43.000Z
"""
Meshes (collections) of geometric objects.
Meshes behave like lists/arrays of geometric objects (they are iterables).
"""
from __future__ import division, absolute_import
from future.builtins import range, object, super
import numpy as np
import scipy.special
import scipy.interpolate
import copy as cp
from .. import gridder
from .geometry import Square, Prism, Sphere, Tesseroid
class SquareMesh(object):
"""
A 2D regular mesh of squares.
For all purposes, :class:`~fatiando.mesher.SquareMesh` can be used as a
list of :class:`~fatiando.mesher.Square`. The order of the squares in the
list is: x directions varies first, then y.
Parameters:
* bounds : list = [x1, x2, y1, y2]
Boundaries of the mesh
* shape : tuple = (ny, nx)
Number of squares in the y and x dimension, respectively
* props : dict
Physical properties of each square in the mesh.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property on
each square of the mesh.
Examples:
>>> mesh = SquareMesh((0, 4, 0, 6), (2, 2))
>>> for s in mesh:
... print s
x1:0 | x2:2 | y1:0 | y2:3
x1:2 | x2:4 | y1:0 | y2:3
x1:0 | x2:2 | y1:3 | y2:6
x1:2 | x2:4 | y1:3 | y2:6
>>> print mesh[1]
x1:2 | x2:4 | y1:0 | y2:3
>>> print mesh[-1]
x1:2 | x2:4 | y1:3 | y2:6
With physical properties::
>>> mesh = SquareMesh((0, 4, 0, 6), (2, 1), {'slowness':[3.4, 8.6]})
>>> for s in mesh:
... print s
x1:0 | x2:4 | y1:0 | y2:3 | slowness:3.4
x1:0 | x2:4 | y1:3 | y2:6 | slowness:8.6
Or::
>>> mesh = SquareMesh((0, 4, 0, 6), (2, 1))
>>> mesh.addprop('slowness', [3.4, 8.6])
>>> for s in mesh:
... print s
x1:0 | x2:4 | y1:0 | y2:3 | slowness:3.4
x1:0 | x2:4 | y1:3 | y2:6 | slowness:8.6
"""
def __init__(self, bounds, shape, props=None):
ny, nx = shape
size = int(nx * ny)
x1, x2, y1, y2 = bounds
dx = (x2 - x1)/nx
dy = (y2 - y1)/ny
self.bounds = bounds
self.shape = tuple(int(i) for i in shape)
self.size = size
self.dims = (dx, dy)
# props has to be None, not {} by default because {} would be permanent
# for all instaces of the class (like a class variable) and changes
# to one instace would lead to changes in another (and a huge mess)
if props is None:
self.props = {}
else:
self.props = props
# The index of the current square in an iteration. Needed when mesh is
# used as an iterator
self.i = 0
# List of masked squares. Will return None if trying to access them
self.mask = []
def __len__(self):
return self.size
def __getitem__(self, index):
# To walk backwards in the list
if index < 0:
index = self.size + index
if index in self.mask:
return None
ny, nx = self.shape
j = index//nx
i = index - j*nx
x1 = self.bounds[0] + self.dims[0] * i
x2 = x1 + self.dims[0]
y1 = self.bounds[2] + self.dims[1] * j
y2 = y1 + self.dims[1]
props = dict([p, self.props[p][index]] for p in self.props)
return Square((x1, x2, y1, y2), props=props)
def __iter__(self):
self.i = 0
return self
def next(self):
if self.i >= self.size:
raise StopIteration
square = self.__getitem__(self.i)
self.i += 1
return square
def addprop(self, prop, values):
"""
Add physical property values to the cells in the mesh.
Different physical properties of the mesh are stored in a dictionary.
Parameters:
* prop : str
Name of the physical property
* values : list or array
The value of this physical property in each square of the mesh.
For the ordering of squares in the mesh see
:class:`~fatiando.mesher.SquareMesh`
"""
self.props[prop] = values
def get_xs(self):
"""
Get a list of the x coordinates of the corners of the cells in the
mesh.
If the mesh has nx cells, get_xs() will return nx + 1 values.
"""
dx, dy = self.dims
x1, x2, y1, y2 = self.bounds
ny, nx = self.shape
xs = np.arange(x1, x2 + dx, dx, 'f')
if len(xs) == nx + 2:
return xs[0:-1]
elif len(xs) == nx:
xs = xs.tolist()
xs.append(x2)
return np.array(xs)
else:
return xs
def get_ys(self):
"""
Get a list of the y coordinates of the corners of the cells in the
mesh.
If the mesh has ny cells, get_ys() will return ny + 1 values.
"""
dx, dy = self.dims
x1, x2, y1, y2 = self.bounds
ny, nx = self.shape
ys = np.arange(y1, y2, dy, 'f')
if len(ys) == ny + 2:
return ys[0:-1]
elif len(ys) == ny:
ys = ys.tolist()
ys.append(y2)
return np.array(ys)
else:
return ys
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class PointGrid(object):
"""
A regular grid of 3D point sources (spheres of unit volume).
Use this as a 1D list of :class:`~fatiando.mesher.Sphere`.
Grid points are ordered like a C matrix, first each row in a column, then
change columns. In this case, the x direction (North-South) are the rows
and y (East-West) are the columns.
Parameters:
* area : list = [x1, x2, y1, y2]
The area where the grid will be spread out
* z : float or 1d-array
The z coordinates of each point in the grid (remember, z is positive
downward).
* shape : tuple = (nx, ny)
The number of points in the x and y directions
* props : dict
Physical properties of each point in the grid.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property for
each point in the grid.
Examples::
>>> g = PointGrid([0, 10, 2, 6], 200, (2, 3))
>>> g.shape
(2, 3)
>>> g.size
6
>>> g[0].center
array([ 0., 2., 200.])
>>> g[-1].center
array([ 10., 6., 200.])
>>> for p in g:
... p.center
array([ 0., 2., 200.])
array([ 0., 4., 200.])
array([ 0., 6., 200.])
array([ 10., 2., 200.])
array([ 10., 4., 200.])
array([ 10., 6., 200.])
>>> g.x.reshape(g.shape)
array([[ 0., 0., 0.],
[ 10., 10., 10.]])
>>> g.y.reshape(g.shape)
array([[ 2., 4., 6.],
[ 2., 4., 6.]])
>>> g.dx, g.dy
(10.0, 2.0)
"""
def __init__(self, area, z, shape, props=None):
self.area = area
self.shape = shape
if props is None:
self.props = {}
else:
self.props = props
nx, ny = shape
self.size = nx*ny
self.z = np.zeros(self.size) + z
self.radius = scipy.special.cbrt(3. / (4. * np.pi))
self.x, self.y = gridder.regular(area, shape)
# The spacing between points
self.dx, self.dy = gridder.spacing(area, shape)
def __len__(self):
return self.size
def __getitem__(self, index):
if not isinstance(index, int):
raise IndexError('Invalid index type. Should be int.')
if index >= self.size or index < -self.size:
raise IndexError('Grid index out of range.')
# To walk backwards in the list
if index < 0:
index = self.size + index
props = dict([p, self.props[p][index]] for p in self.props)
sphere = Sphere(self.x[index], self.y[index], self.z[index],
self.radius, props=props)
return sphere
def __iter__(self):
self.i = 0
return self
def next(self):
if self.i >= self.size:
raise StopIteration
sphere = self.__getitem__(self.i)
self.i += 1
return sphere
def addprop(self, prop, values):
"""
Add physical property values to the points in the grid.
Different physical properties of the grid are stored in a dictionary.
Parameters:
* prop : str
Name of the physical property.
* values : list or array
Value of this physical property in each point of the grid
"""
self.props[prop] = values
def split(self, shape):
"""
Divide the grid into subgrids.
.. note::
Remember that x is the North-South direction and y is East-West.
Parameters:
* shape : tuple = (nx, ny)
Number of subgrids along the x and y directions, respectively.
Returns:
* subgrids : list
List of :class:`~fatiando.mesher.PointGrid`
Examples::
>>> import numpy as np
>>> z = np.linspace(0, 1100, 12)
>>> g = PointGrid((0, 3, 0, 2), z, (4, 3))
>>> g.addprop('bla', [1, 2, 3,
... 4, 5, 6,
... 7, 8, 9,
... 10, 11, 12])
>>> grids = g.split((2, 3))
>>> for s in grids:
... s.props['bla']
array([1, 4])
array([2, 5])
array([3, 6])
array([ 7, 10])
array([ 8, 11])
array([ 9, 12])
>>> for s in grids:
... s.x
array([ 0., 1.])
array([ 0., 1.])
array([ 0., 1.])
array([ 2., 3.])
array([ 2., 3.])
array([ 2., 3.])
>>> for s in grids:
... s.y
array([ 0., 0.])
array([ 1., 1.])
array([ 2., 2.])
array([ 0., 0.])
array([ 1., 1.])
array([ 2., 2.])
>>> for s in grids:
... s.z
array([ 0., 300.])
array([ 100., 400.])
array([ 200., 500.])
array([ 600., 900.])
array([ 700., 1000.])
array([ 800., 1100.])
"""
nx, ny = shape
totalx, totaly = self.shape
if totalx % nx != 0 or totaly % ny != 0:
raise ValueError(
'Cannot split! nx and ny must be divisible by grid shape')
x1, x2, y1, y2 = self.area
xs = np.linspace(x1, x2, totalx)
ys = np.linspace(y1, y2, totaly)
mx, my = (totalx//nx, totaly//ny)
dx, dy = self.dx*(mx - 1), self.dy*(my - 1)
subs = []
for i, xstart in enumerate(xs[::mx]):
for j, ystart in enumerate(ys[::my]):
area = [xstart, xstart + dx, ystart, ystart + dy]
props = {}
for p in self.props:
pmatrix = np.reshape(self.props[p], self.shape)
props[p] = pmatrix[i*mx:(i + 1)*mx,
j*my:(j + 1)*my].ravel()
zmatrix = np.reshape(self.z, self.shape)
zs = zmatrix[i*mx:(i + 1)*mx,
j*my:(j + 1)*my].ravel()
subs.append(PointGrid(area, zs, (mx, my), props))
return subs
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class PrismRelief(object):
"""
A 3D model of a relief (topography) using prisms.
Use to generate:
* topographic model
* basin model
* Moho model
* etc
PrismRelief can used as list of prisms. It acts as an iteratior (so you
can loop over prisms). It also has a ``__getitem__`` method to access
individual elements in the mesh.
In practice, PrismRelief should be able to be passed to any function that
asks for a list of prisms, like :func:`fatiando.gravmag.prism.gz`.
Parameters:
* ref : float
Reference level. Prisms will have:
* bottom on zref and top on z if z > zref;
* bottom on z and top on zref otherwise.
* dims : tuple = (dy, dx)
Dimensions of the prisms in the y and x directions
* nodes : list of lists = [x, y, z]
Coordinates of the center of the top face of each prism.x, y, and z are
lists with the x, y and z coordinates on a regular grid.
"""
def __init__(self, ref, dims, nodes):
x, y, z = nodes
if len(x) != len(y) != len(z):
raise ValueError(
"nodes has x, y, z coordinate arrays of different lengths")
self.x, self.y, self.z = x, y, z
self.size = len(x)
self.ref = ref
self.dy, self.dx = dims
self.props = {}
# The index of the current prism in an iteration. Needed when mesh is
# used as an iterator
self.i = 0
def __len__(self):
return self.size
def __iter__(self):
self.i = 0
return self
def __getitem__(self, index):
# To walk backwards in the list
if index < 0:
index = self.size + index
xc, yc, zc = self.x[index], self.y[index], self.z[index]
x1 = xc - 0.5 * self.dx
x2 = xc + 0.5 * self.dx
y1 = yc - 0.5 * self.dy
y2 = yc + 0.5 * self.dy
if zc <= self.ref:
z1 = zc
z2 = self.ref
else:
z1 = self.ref
z2 = zc
props = dict([p, self.props[p][index]] for p in self.props)
return Prism(x1, x2, y1, y2, z1, z2, props=props)
def next(self):
if self.i >= self.size:
raise StopIteration
prism = self.__getitem__(self.i)
self.i += 1
return prism
def addprop(self, prop, values):
"""
Add physical property values to the prisms.
.. warning:: If the z value of any point in the relief is below the
reference level, its corresponding prism will have the physical
property value with oposite sign than was assigned to it.
Parameters:
* prop : str
Name of the physical property.
* values : list
List or array with the value of this physical property in each
prism of the relief.
"""
def correct(v, i):
if self.z[i] > self.ref:
return -v
return v
self.props[prop] = [correct(v, i) for i, v in enumerate(values)]
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class PrismMesh(object):
"""
A 3D regular mesh of right rectangular prisms.
Prisms are ordered as follows: first layers (z coordinate),
then EW rows (y) and finaly x coordinate (NS).
.. note:: Remember that the coordinate system is x->North, y->East and
z->Down
Ex: in a mesh with shape ``(3,3,3)`` the 15th element (index 14) has z
index 1 (second layer), y index 1 (second row), and x index 2 (third
element in the column).
:class:`~fatiando.mesher.PrismMesh` can used as list of prisms. It acts
as an iteratior (so you can loop over prisms). It also has a
``__getitem__`` method to access individual elements in the mesh.
In practice, :class:`~fatiando.mesher.PrismMesh` should be able to be
passed to any function that asks for a list of prisms, like
:func:`fatiando.gravmag.prism.gz`.
To make the mesh incorporate a topography, use
:meth:`~fatiando.mesher.PrismMesh.carvetopo`
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
Boundaries of the mesh.
* shape : tuple = (nz, ny, nx)
Number of prisms in the x, y, and z directions.
* props : dict
Physical properties of each prism in the mesh.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property on
each prism of the mesh.
Examples:
>>> from fatiando.mesher import PrismMesh
>>> mesh = PrismMesh((0, 1, 0, 2, 0, 3), (1, 2, 2))
>>> for p in mesh:
... print p
x1:0 | x2:0.5 | y1:0 | y2:1 | z1:0 | z2:3
x1:0.5 | x2:1 | y1:0 | y2:1 | z1:0 | z2:3
x1:0 | x2:0.5 | y1:1 | y2:2 | z1:0 | z2:3
x1:0.5 | x2:1 | y1:1 | y2:2 | z1:0 | z2:3
>>> print mesh[0]
x1:0 | x2:0.5 | y1:0 | y2:1 | z1:0 | z2:3
>>> print mesh[-1]
x1:0.5 | x2:1 | y1:1 | y2:2 | z1:0 | z2:3
One with physical properties::
>>> props = {'density':[2670.0, 1000.0]}
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2), props=props)
>>> for p in mesh:
... print p
x1:0 | x2:1 | y1:0 | y2:4 | z1:0 | z2:3 | density:2670
x1:1 | x2:2 | y1:0 | y2:4 | z1:0 | z2:3 | density:1000
or equivalently::
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2))
>>> mesh.addprop('density', [200, -1000.0])
>>> for p in mesh:
... print p
x1:0 | x2:1 | y1:0 | y2:4 | z1:0 | z2:3 | density:200
x1:1 | x2:2 | y1:0 | y2:4 | z1:0 | z2:3 | density:-1000
You can use :meth:`~fatiando.mesher.PrismMesh.get_xs` (and similar
methods for y and z) to get the x coordinates of the prisms in the mesh::
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2))
>>> print mesh.get_xs()
[ 0. 1. 2.]
>>> print mesh.get_ys()
[ 0. 4.]
>>> print mesh.get_zs()
[ 0. 3.]
The ``shape`` of the mesh must be integer!
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2.5))
Traceback (most recent call last):
...
AttributeError: Invalid mesh shape (1, 1, 2.5). shape must be integers
"""
celltype = Prism
def __init__(self, bounds, shape, props=None):
nz, ny, nx = shape
if not isinstance(nx, int) or not isinstance(ny, int) or \
not isinstance(nz, int):
raise AttributeError(
'Invalid mesh shape {}. shape must be integers'.format(
str(shape)))
size = int(nx * ny * nz)
x1, x2, y1, y2, z1, z2 = bounds
dx = (x2 - x1)/nx
dy = (y2 - y1)/ny
dz = (z2 - z1)/nz
self.shape = tuple(int(i) for i in shape)
self.size = size
self.dims = (dx, dy, dz)
self.bounds = bounds
if props is None:
self.props = {}
else:
self.props = props
# The index of the current prism in an iteration. Needed when mesh is
# used as an iterator
self.i = 0
# List of masked prisms. Will return None if trying to access them
self.mask = []
# Wether or not to change heights to z coordinate
self.zdown = True
def __len__(self):
return self.size
def __getitem__(self, index):
if index >= self.size or index < -self.size:
raise IndexError('mesh index out of range')
# To walk backwards in the list
if index < 0:
index = self.size + index
if index in self.mask:
return None
nz, ny, nx = self.shape
k = index//(nx*ny)
j = (index - k*(nx*ny))//nx
i = (index - k*(nx*ny) - j*nx)
x1 = self.bounds[0] + self.dims[0] * i
x2 = x1 + self.dims[0]
y1 = self.bounds[2] + self.dims[1] * j
y2 = y1 + self.dims[1]
z1 = self.bounds[4] + self.dims[2] * k
z2 = z1 + self.dims[2]
props = dict([p, self.props[p][index]] for p in self.props)
return self.celltype(x1, x2, y1, y2, z1, z2, props=props)
def __iter__(self):
self.i = 0
return self
def next(self):
if self.i >= self.size:
raise StopIteration
prism = self.__getitem__(self.i)
self.i += 1
return prism
def addprop(self, prop, values):
"""
Add physical property values to the cells in the mesh.
Different physical properties of the mesh are stored in a dictionary.
Parameters:
* prop : str
Name of the physical property.
* values : list or array
Value of this physical property in each prism of the mesh. For the
ordering of prisms in the mesh see
:class:`~fatiando.mesher.PrismMesh`
"""
self.props[prop] = values
def carvetopo(self, x, y, height, below=False):
"""
Mask (remove) prisms from the mesh that are above the topography.
Accessing the ith prism will return None if it was masked (above the
topography).
Also mask prisms outside of the topography grid provided.
The topography height information does not need to be on a regular
grid, it will be interpolated.
Parameters:
* x, y : lists
x and y coordinates of the grid points
* height : list or array
Array with the height of the topography
* below : boolean
Will mask prisms below the input surface if set to *True*.
"""
nz, ny, nx = self.shape
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
# The coordinates of the centers of the cells
xc = np.arange(x1, x2, dx) + 0.5 * dx
# Sometimes arange returns more due to rounding
if len(xc) > nx:
xc = xc[:-1]
yc = np.arange(y1, y2, dy) + 0.5 * dy
if len(yc) > ny:
yc = yc[:-1]
zc = np.arange(z1, z2, dz) + 0.5 * dz
if len(zc) > nz:
zc = zc[:-1]
XC, YC = np.meshgrid(xc, yc)
topo = scipy.interpolate.griddata((x, y), height, (XC, YC),
method='cubic').ravel()
if self.zdown:
# -1 if to transform height into z coordinate
topo = -1 * topo
# griddata returns a masked array. If the interpolated point is out of
# of the data range, mask will be True. Use this to remove all cells
# below a masked topo point (ie, one with no height information)
if np.ma.isMA(topo):
topo_mask = topo.mask
else:
topo_mask = [False for i in range(len(topo))]
c = 0
for cellz in zc:
for h, masked in zip(topo, topo_mask):
if below:
if (masked or
(cellz > h and self.zdown) or
(cellz < h and not self.zdown)):
self.mask.append(c)
else:
if (masked or
(cellz < h and self.zdown) or
(cellz > h and not self.zdown)):
self.mask.append(c)
c += 1
def get_xs(self):
"""
Return an array with the x coordinates of the prisms in mesh.
"""
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
nz, ny, nx = self.shape
xs = np.arange(x1, x2 + dx, dx)
if xs.size > nx + 1:
return xs[:-1]
return xs
def get_ys(self):
"""
Return an array with the y coordinates of the prisms in mesh.
"""
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
nz, ny, nx = self.shape
ys = np.arange(y1, y2 + dy, dy)
if ys.size > ny + 1:
return ys[:-1]
return ys
def get_zs(self):
"""
Return an array with the z coordinates of the prisms in mesh.
"""
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
nz, ny, nx = self.shape
zs = np.arange(z1, z2 + dz, dz)
if zs.size > nz + 1:
return zs[:-1]
return zs
def get_layer(self, i):
"""
Return the set of prisms corresponding to the ith layer of the mesh.
Parameters:
* i : int
The index of the layer
Returns:
* prisms : list of :class:`~fatiando.mesher.Prism`
The prisms in the ith layer
Examples::
>>> mesh = PrismMesh((0, 2, 0, 2, 0, 2), (2, 2, 2))
>>> layer = mesh.get_layer(0)
>>> for p in layer:
... print p
x1:0 | x2:1 | y1:0 | y2:1 | z1:0 | z2:1
x1:1 | x2:2 | y1:0 | y2:1 | z1:0 | z2:1
x1:0 | x2:1 | y1:1 | y2:2 | z1:0 | z2:1
x1:1 | x2:2 | y1:1 | y2:2 | z1:0 | z2:1
>>> layer = mesh.get_layer(1)
>>> for p in layer:
... print p
x1:0 | x2:1 | y1:0 | y2:1 | z1:1 | z2:2
x1:1 | x2:2 | y1:0 | y2:1 | z1:1 | z2:2
x1:0 | x2:1 | y1:1 | y2:2 | z1:1 | z2:2
x1:1 | x2:2 | y1:1 | y2:2 | z1:1 | z2:2
"""
nz, ny, nx = self.shape
if i >= nz or i < 0:
raise IndexError('Layer index %d is out of range.' % (i))
start = i * nx * ny
end = (i + 1) * nx * ny
layer = [self.__getitem__(p) for p in range(start, end)]
return layer
def layers(self):
"""
Returns an iterator over the layers of the mesh.
Examples::
>>> mesh = PrismMesh((0, 2, 0, 2, 0, 2), (2, 2, 2))
>>> for layer in mesh.layers():
... for p in layer:
... print p
x1:0 | x2:1 | y1:0 | y2:1 | z1:0 | z2:1
x1:1 | x2:2 | y1:0 | y2:1 | z1:0 | z2:1
x1:0 | x2:1 | y1:1 | y2:2 | z1:0 | z2:1
x1:1 | x2:2 | y1:1 | y2:2 | z1:0 | z2:1
x1:0 | x2:1 | y1:0 | y2:1 | z1:1 | z2:2
x1:1 | x2:2 | y1:0 | y2:1 | z1:1 | z2:2
x1:0 | x2:1 | y1:1 | y2:2 | z1:1 | z2:2
x1:1 | x2:2 | y1:1 | y2:2 | z1:1 | z2:2
"""
nz, ny, nx = self.shape
for i in range(nz):
yield self.get_layer(i)
def dump(self, meshfile, propfile, prop):
r"""
Dump the mesh to a file in the format required by UBC-GIF program
MeshTools3D.
Parameters:
* meshfile : str or file
Output file to save the mesh. Can be a file name or an open file.
* propfile : str or file
Output file to save the physical properties *prop*. Can be a file
name or an open file.
* prop : str
The name of the physical property in the mesh that will be saved to
*propfile*.
.. note:: Uses -10000000 as the dummy value for plotting topography
Examples:
>>> from StringIO import StringIO
>>> meshfile = StringIO()
>>> densfile = StringIO()
>>> mesh = PrismMesh((0, 10, 0, 20, 0, 5), (1, 2, 2))
>>> mesh.addprop('density', [1, 2, 3, 4])
>>> mesh.dump(meshfile, densfile, 'density')
>>> print meshfile.getvalue().strip()
2 2 1
0 0 0
2*10
2*5
1*5
>>> print densfile.getvalue().strip()
1.0000
3.0000
2.0000
4.0000
"""
if prop not in self.props:
raise ValueError("mesh doesn't have a '%s' property." % (prop))
isstr = False
if isinstance(meshfile, str):
isstr = True
meshfile = open(meshfile, 'w')
nz, ny, nx = self.shape
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
meshfile.writelines([
"%d %d %d\n" % (ny, nx, nz),
"%g %g %g\n" % (y1, x1, -z1),
"%d*%g\n" % (ny, dy),
"%d*%g\n" % (nx, dx),
"%d*%g" % (nz, dz)])
if isstr:
meshfile.close()
values = np.fromiter(self.props[prop], dtype=np.float)
# Replace the masked cells with a dummy value
values[self.mask] = -10000000
reordered = np.ravel(np.reshape(values, self.shape), order='F')
np.savetxt(propfile, reordered, fmt='%.4f')
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class TesseroidMesh(PrismMesh):
"""
A 3D regular mesh of tesseroids.
Tesseroids are ordered as follows: first layers (height coordinate),
then N-S rows and finaly E-W.
Ex: in a mesh with shape ``(3,3,3)`` the 15th element (index 14) has height
index 1 (second layer), y index 1 (second row), and x index 2 (
third element in the column).
This class can used as list of tesseroids. It acts
as an iteratior (so you can loop over tesseroids).
It also has a ``__getitem__``
method to access individual elements in the mesh.
In practice, it should be able to be
passed to any function that asks for a list of tesseroids, like
:func:`fatiando.gravmag.tesseroid.gz`.
To make the mesh incorporate a topography, use
:meth:`~fatiando.mesher.TesseroidMesh.carvetopo`
Parameters:
* bounds : list = [w, e, s, n, top, bottom]
Boundaries of the mesh. ``w, e, s, n`` in degrees, ``top`` and
``bottom`` are heights (positive upward) and in meters.
* shape : tuple = (nr, nlat, nlon)
Number of tesseroids in the radial, latitude, and longitude directions.
* props : dict
Physical properties of each tesseroid in the mesh.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property on
each tesseroid of the mesh.
Examples:
>>> from fatiando.mesher import TesseroidMesh
>>> mesh = TesseroidMesh((0, 1, 0, 2, 3, 0), (1, 2, 2))
>>> for p in mesh:
... print p
w:0 | e:0.5 | s:0 | n:1 | top:3 | bottom:0
w:0.5 | e:1 | s:0 | n:1 | top:3 | bottom:0
w:0 | e:0.5 | s:1 | n:2 | top:3 | bottom:0
w:0.5 | e:1 | s:1 | n:2 | top:3 | bottom:0
>>> print mesh[0]
w:0 | e:0.5 | s:0 | n:1 | top:3 | bottom:0
>>> print mesh[-1]
w:0.5 | e:1 | s:1 | n:2 | top:3 | bottom:0
One with physical properties::
>>> props = {'density':[2670.0, 1000.0]}
>>> mesh = TesseroidMesh((0, 2, 0, 4, 3, 0), (1, 1, 2), props=props)
>>> for p in mesh:
... print p
w:0 | e:1 | s:0 | n:4 | top:3 | bottom:0 | density:2670
w:1 | e:2 | s:0 | n:4 | top:3 | bottom:0 | density:1000
or equivalently::
>>> mesh = TesseroidMesh((0, 2, 0, 4, 3, 0), (1, 1, 2))
>>> mesh.addprop('density', [200, -1000.0])
>>> for p in mesh:
... print p
w:0 | e:1 | s:0 | n:4 | top:3 | bottom:0 | density:200
w:1 | e:2 | s:0 | n:4 | top:3 | bottom:0 | density:-1000
You can use :meth:`~fatiando.mesher.PrismMesh.get_xs` (and similar
methods for y and z) to get the x coordinates of the tesseroidss in the
mesh::
>>> mesh = TesseroidMesh((0, 2, 0, 4, 3, 0), (1, 1, 2))
>>> print mesh.get_xs()
[ 0. 1. 2.]
>>> print mesh.get_ys()
[ 0. 4.]
>>> print mesh.get_zs()
[ 3. 0.]
You can iterate over the layers of the mesh::
>>> mesh = TesseroidMesh((0, 2, 0, 2, 2, 0), (2, 2, 2))
>>> for layer in mesh.layers():
... for p in layer:
... print p
w:0 | e:1 | s:0 | n:1 | top:2 | bottom:1
w:1 | e:2 | s:0 | n:1 | top:2 | bottom:1
w:0 | e:1 | s:1 | n:2 | top:2 | bottom:1
w:1 | e:2 | s:1 | n:2 | top:2 | bottom:1
w:0 | e:1 | s:0 | n:1 | top:1 | bottom:0
w:1 | e:2 | s:0 | n:1 | top:1 | bottom:0
w:0 | e:1 | s:1 | n:2 | top:1 | bottom:0
w:1 | e:2 | s:1 | n:2 | top:1 | bottom:0
The ``shape`` of the mesh must be integer!
>>> mesh = TesseroidMesh((0, 2, 0, 4, 0, 3), (1, 1, 2.5))
Traceback (most recent call last):
...
AttributeError: Invalid mesh shape (1, 1, 2.5). shape must be integers
"""
celltype = Tesseroid
def __init__(self, bounds, shape, props=None):
super().__init__(bounds, shape, props)
self.zdown = False
self.dump = None
#!/usr/bin/env python
#
# GrovePi Example for using the Grove - CO2 Sensor(http://www.seeedstudio.com/depot/Grove-CO2-Sensor-p-1863.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# Connect the CO2 sensor to the RPISER port on the GrovePi
import grove_co2_lib
import time
co2= grove_co2_lib.CO2()
while True:
[ppm,temp]= co2.read()
print("CO2 Conc: %d ppm\t Temp: %d C" %(ppm,temp))
time.sleep(1)
39.869565
139
0.77699
793f7bf40b7b234edb8137e9245bbf40a4537087
774
py
Python
configs/swa/swa_cascade_s50_rfp_mstrain_aug.py
NEUdeep/TileDetection
f453ac868de195a7859b9bf07c813e46eb35d2d0
[
"Apache-2.0"
]
41
2021-03-23T23:43:00.000Z
2022-03-22T12:42:53.000Z
configs/swa/swa_cascade_s50_rfp_mstrain_aug.py
hlcedu/TileDetection
77b5ef4bb4db29f5ffe6a6fa9f87b4bfe8516e4c
[
"Apache-2.0"
]
3
2021-09-12T13:04:34.000Z
2022-03-23T07:29:43.000Z
configs/swa/swa_cascade_s50_rfp_mstrain_aug.py
hlcedu/TileDetection
77b5ef4bb4db29f5ffe6a6fa9f87b4bfe8516e4c
[
"Apache-2.0"
]
7
2021-03-31T03:21:43.000Z
2021-12-27T08:50:13.000Z
_base_ = ['../tile_round2/cascade_s50_rfp_mstrain.py', '../_base_/swa.py']
only_swa_training = True
# whether to perform swa training
swa_training = True
# load the best pre_trained model as the starting model for swa training
swa_load_from = 'work_dirs/round2/cascade_s50_rfp_mstrain_aug_alldata/latest.pth'
swa_resume_from = None
# swa optimizer
swa_optimizer = dict(_delete_=True, type='Adam', lr=7e-5)
swa_optimizer_config = dict(grad_clip=None)
# swa learning policy
swa_lr_config = dict(
policy='cyclic',
target_ratio=(1, 0.01),
cyclic_times=12,
step_ratio_up=0.0)
swa_total_epochs = 12
# swa checkpoint setting
swa_checkpoint_config = dict(interval=1, filename_tmpl='swa_epoch_{}.pth')
work_dir = 'work_dirs/round2/swa_cascade_s50_rfp_mstrain_aug'
32.25
81
0.776486
793f7bf8a56a3147e060b10df36e3a9bd3ab1265
91
py
Python
setup.py
3rwww1/setuptools_scm_azure_pipelines
24c9db0ce0b3f5bfd5ffe8be9550b97350eefe6b
[
"MIT"
]
null
null
null
setup.py
3rwww1/setuptools_scm_azure_pipelines
24c9db0ce0b3f5bfd5ffe8be9550b97350eefe6b
[
"MIT"
]
null
null
null
setup.py
3rwww1/setuptools_scm_azure_pipelines
24c9db0ce0b3f5bfd5ffe8be9550b97350eefe6b
[
"MIT"
]
null
null
null
#!/usr/bin/env python
import setuptools
if __name__ == '__main__':
setuptools.setup()
15.166667
26
0.703297
793f7cbb82bbc1eb6a8844b9e5e744c7d2816a6d
5,037
py
Python
qa/rpc-tests/prioritise_transaction.py
v-core/v
f87e0fb859aae6aa7de8b3816093bf900fdcce42
[
"MIT"
]
1
2018-01-19T19:29:29.000Z
2018-01-19T19:29:29.000Z
qa/rpc-tests/prioritise_transaction.py
v-core/v
f87e0fb859aae6aa7de8b3816093bf900fdcce42
[
"MIT"
]
null
null
null
qa/rpc-tests/prioritise_transaction.py
v-core/v
f87e0fb859aae6aa7de8b3816093bf900fdcce42
[
"MIT"
]
null
null
null
#!/usr/bin/env python2
# Copyright (c) 2015 The VCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import VCoinTestFramework
from test_framework.util import *
COIN = 100000000
class PrioritiseTransactionTest(VCoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
for i in xrange(3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print "Assert that prioritised transasction was mined"
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the high feerate transaction isn't mined.
self.nodes[0].generate(5)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print "Assert that de-prioritised transaction is still in mempool"
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print "Assert that prioritised free transaction is accepted to mempool"
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main()
import logging
from pyvisdk.exceptions import InvalidArgumentError
# This module is NOT auto-generated
# Inspired by decompiled Java classes from vCenter's internalvim25stubs.jar
# Unless states otherside, the methods and attributes were not used by esxcli,
# and thus not tested
log = logging.getLogger(__name__)
def VimEsxCLIiscsisessionlistSession(vim, *args, **kwargs):
obj = vim.client.factory.create('{urn:vim25}VimEsxCLIiscsisessionlistSession')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'Adapter', 'AuthenticationMethod', 'DataPduInOrder', 'DataSequenceInOrder', 'DefaultTime2Retain', 'DefaultTime2Wait', 'ErrorRecoveryLevel', 'FirstBurstLength', 'ID', 'ISID', 'ImmediateData', 'InitialR2T', 'MaxBurstLength', 'MaxConnections', 'MaxOutstandingR2T', 'TSIH', 'Target', 'TargetPortalGroupTag' ]
for name, arg in zip(required + optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
42.451613
321
0.702888
793f7f5d8aae11ac4517a3c878293c40604e0832
26,465
py
Python
cloudkitty/storage/v2/gnocchi.py
chnyda/cloudkitty
9c65b1d2304b8b963e12ef1918b9b23e180131b7
[
"Apache-2.0"
]
null
null
null
cloudkitty/storage/v2/gnocchi.py
chnyda/cloudkitty
9c65b1d2304b8b963e12ef1918b9b23e180131b7
[
"Apache-2.0"
]
null
null
null
cloudkitty/storage/v2/gnocchi.py
chnyda/cloudkitty
9c65b1d2304b8b963e12ef1918b9b23e180131b7
[
"Apache-2.0"
]
null
null
null
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from collections import deque
from collections import Iterable
import copy
import datetime
import decimal
import time
from gnocchiclient import auth as gauth
from gnocchiclient import client as gclient
from gnocchiclient import exceptions as gexceptions
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
import six
from cloudkitty.storage.v2 import BaseStorage
from cloudkitty import utils as ck_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
gnocchi_storage_opts = [
cfg.StrOpt(
'gnocchi_auth_type',
default='keystone',
choices=['keystone', 'basic'],
help='(v2) Gnocchi auth type (keystone or basic). Keystone '
'credentials can be specified through the "auth_section" parameter',
),
cfg.StrOpt(
'gnocchi_user',
default='',
help='(v2) Gnocchi user (for basic auth only)',
),
cfg.StrOpt(
'gnocchi_endpoint',
default='',
help='(v2) Gnocchi endpoint (for basic auth only)',
),
cfg.StrOpt(
'api_interface',
default='internalURL',
help='(v2) Endpoint URL type (for keystone auth only)',
),
cfg.IntOpt(
'measure_chunk_size',
min=10, max=1000000,
default=500,
help='(v2) Maximum amount of measures to send to gnocchi at once '
'(defaults to 500).',
),
]
CONF.register_opts(gnocchi_storage_opts, 'storage_gnocchi')
ks_loading.register_session_conf_options(CONF, 'storage_gnocchi')
ks_loading.register_auth_conf_options(CONF, 'storage_gnocchi')
RESOURCE_TYPE_NAME_ROOT = 'cloudkitty_metric_'
ARCHIVE_POLICY_NAME = 'cloudkitty_archive_policy'
GROUPBY_NAME_ROOT = 'groupby_attr_'
META_NAME_ROOT = 'meta_attr_'
class GnocchiResource(object):
"""Class representing a gnocchi resource
It provides utils for resource_type/resource creation and identifying.
"""
def __init__(self, name, metric, conn):
"""Resource_type name, metric, gnocchiclient"""
self.name = name
self.resource_type = RESOURCE_TYPE_NAME_ROOT + name
self.unit = metric['vol']['unit']
self.groupby = {
k: v if v else '' for k, v in metric['groupby'].items()}
self.metadata = {
k: v if v else '' for k, v in metric['metadata'].items()}
self._trans_groupby = {
GROUPBY_NAME_ROOT + key: val for key, val in self.groupby.items()
}
self._trans_metadata = {
META_NAME_ROOT + key: val for key, val in self.metadata.items()
}
self._conn = conn
self._resource = None
self.attributes = self.metadata.copy()
self.attributes.update(self.groupby)
self._trans_attributes = self._trans_metadata.copy()
self._trans_attributes.update(self._trans_groupby)
self.needs_update = False
def __getitem__(self, key):
output = self._trans_attributes.get(GROUPBY_NAME_ROOT + key, None)
if output is None:
output = self._trans_attributes.get(META_NAME_ROOT + key, None)
return output
def __eq__(self, other):
if self.resource_type != other.resource_type or \
self['id'] != other['id']:
return False
own_keys = list(self.groupby.keys())
own_keys.sort()
other_keys = list(other.groupby.keys())
other_keys.sort()
if own_keys != other_keys:
return False
for key in own_keys:
if other[key] != self[key]:
return False
return True
@property
def qty(self):
if self._resource:
return self._resource['metrics']['qty']
return None
@property
def cost(self):
if self._resource:
return self._resource['metrics']['cost']
return None
def _get_res_type_dict(self):
attributes = {}
for key in self._trans_groupby.keys():
attributes[key] = {'required': True, 'type': 'string'}
attributes['unit'] = {'required': True, 'type': 'string'}
for key in self._trans_metadata.keys():
attributes[key] = {'required': False, 'type': 'string'}
return {
'name': self.resource_type,
'attributes': attributes,
}
def create_resource_type(self):
"""Allows to create the type corresponding to this resource."""
try:
self._conn.resource_type.get(self.resource_type)
except gexceptions.ResourceTypeNotFound:
res_type = self._get_res_type_dict()
LOG.debug('Creating resource_type {} in gnocchi'.format(
self.resource_type))
self._conn.resource_type.create(res_type)
@staticmethod
def _get_rfc6902_attributes_add_op(new_attributes):
return [{
'op': 'add',
'path': '/attributes/{}'.format(attr),
'value': {
'required': attr.startswith(GROUPBY_NAME_ROOT),
'type': 'string'
}
} for attr in new_attributes]
def update_resource_type(self):
needed_res_type = self._get_res_type_dict()
current_res_type = self._conn.resource_type.get(
needed_res_type['name'])
new_attributes = [attr for attr in needed_res_type['attributes'].keys()
if attr not in current_res_type['attributes'].keys()]
if not new_attributes:
return
LOG.info('Adding {} to resource_type {}'.format(
[attr.replace(GROUPBY_NAME_ROOT, '').replace(META_NAME_ROOT, '')
for attr in new_attributes],
current_res_type['name'].replace(RESOURCE_TYPE_NAME_ROOT, ''),
))
new_attributes_op = self._get_rfc6902_attributes_add_op(new_attributes)
self._conn.resource_type.update(
needed_res_type['name'], new_attributes_op)
def _create_metrics(self):
qty = self._conn.metric.create(
name='qty',
unit=self.unit,
archive_policy_name=ARCHIVE_POLICY_NAME,
)
cost = self._conn.metric.create(
name='cost',
archive_policy_name=ARCHIVE_POLICY_NAME,
)
return qty, cost
def exists_in_gnocchi(self):
"""Check if the resource exists in gnocchi.
Returns true if the resource exists.
"""
query = {
'and': [
{'=': {key: value}}
for key, value in self._trans_groupby.items()
],
}
res = self._conn.resource.search(resource_type=self.resource_type,
query=query)
if len(res) > 1:
LOG.warning(
"Found more than one metric matching groupby. This may not "
"have the behavior you're expecting. You should probably add "
"some items to groupby")
if len(res) > 0:
self._resource = res[0]
return True
return False
def create(self):
"""Creates the resource in gnocchi."""
if self._resource:
return
self.create_resource_type()
qty_metric, cost_metric = self._create_metrics()
resource = self._trans_attributes.copy()
resource['metrics'] = {
'qty': qty_metric['id'],
'cost': cost_metric['id'],
}
resource['id'] = uuidutils.generate_uuid()
resource['unit'] = self.unit
if not self.exists_in_gnocchi():
try:
self._resource = self._conn.resource.create(
self.resource_type, resource)
# Attributes have changed
except gexceptions.BadRequest:
self.update_resource_type()
self._resource = self._conn.resource.create(
self.resource_type, resource)
def update(self, metric):
for key, val in metric['metadata'].items():
self._resource[META_NAME_ROOT + key] = val
self._resource = self._conn.update(
self.resource_type, self._resource['id'], self._resource)
self.needs_update = False
return self._resource
class GnocchiResourceCacher(object):
"""Class allowing to keep created resource in memory to improve perfs.
It keeps the last max_size resources in cache.
"""
def __init__(self, max_size=500):
self._resources = deque(maxlen=max_size)
def __contains__(self, resource):
for r in self._resources:
if r == resource:
for key, val in resource.metadata.items():
if val != r[key]:
r.needs_update = True
return True
return False
def add_resource(self, resource):
"""Add a resource to the cacher.
:param resource: resource to add
:type resource: GnocchiResource
"""
for r in self._resources:
if r == resource:
return
self._resources.append(resource)
def get(self, resource):
"""Returns the resource matching to the parameter.
:param resource: resource to get
:type resource: GnocchiResource
"""
for r in self._resources:
if r == resource:
return r
return None
def get_by_id(self, resource_id):
"""Returns the resource matching the given id.
:param resource_id: ID of the resource to get
:type resource: str
"""
for r in self._resources:
if r['id'] == resource_id:
return r
return None
class GnocchiStorage(BaseStorage):
default_op = ['aggregate', 'sum', ['metric', 'cost', 'sum'], ]
def _check_archive_policy(self):
try:
self._conn.archive_policy.get(ARCHIVE_POLICY_NAME)
except gexceptions.ArchivePolicyNotFound:
definition = [
{'granularity': str(CONF.collect.period) + 's',
'timespan': '{d} days'.format(d=self.get_retention().days)},
]
archive_policy = {
'name': ARCHIVE_POLICY_NAME,
'back_window': 0,
'aggregation_methods': [
'std', 'count', 'min', 'max', 'sum', 'mean'],
'definition': definition,
}
self._conn.archive_policy.create(archive_policy)
def __init__(self, *args, **kwargs):
super(GnocchiStorage, self).__init__(*args, **kwargs)
adapter_options = {'connect_retries': 3}
if CONF.storage_gnocchi.gnocchi_auth_type == 'keystone':
auth_plugin = ks_loading.load_auth_from_conf_options(
CONF,
'storage_gnocchi',
)
adapter_options['interface'] = CONF.storage_gnocchi.api_interface
else:
auth_plugin = gauth.GnocchiBasicPlugin(
user=CONF.storage_gnocchi.gnocchi_user,
endpoint=CONF.storage_gnocchi.gnocchi_endpoint,
)
self._conn = gclient.Client(
'1',
session_options={'auth': auth_plugin},
adapter_options=adapter_options,
)
self._cacher = GnocchiResourceCacher()
def init(self):
self._check_archive_policy()
def _check_resource(self, metric_name, metric):
resource = GnocchiResource(metric_name, metric, self._conn)
if resource in self._cacher:
return self._cacher.get(resource)
resource.create()
self._cacher.add_resource(resource)
return resource
def _push_measures_to_gnocchi(self, measures):
if measures:
try:
self._conn.metric.batch_metrics_measures(measures)
except gexceptions.BadRequest:
LOG.warning(
'An exception occured while trying to push measures to '
'gnocchi. Retrying in 1 second. If this happens again, '
'set measure_chunk_size to a lower value.')
time.sleep(1)
self._conn.metric.batch_metrics_measures(measures)
# Do not use scope_id, as it is deprecated and will be
# removed together with the v1 storage
def push(self, dataframes, scope_id=None):
if not isinstance(dataframes, list):
dataframes = [dataframes]
measures = {}
nb_measures = 0
for dataframe in dataframes:
timestamp = dataframe['period']['begin']
for metric_name, metrics in dataframe['usage'].items():
for metric in metrics:
resource = self._check_resource(metric_name, metric)
if resource.needs_update:
resource.update(metric)
if not resource.qty or not resource.cost:
LOG.warning('Unexpected continue')
continue
# resource.qty is the uuid of the qty metric
if not measures.get(resource.qty):
measures[resource.qty] = []
measures[resource.qty].append({
'timestamp': timestamp,
'value': metric['vol']['qty'],
})
if not measures.get(resource.cost):
measures[resource.cost] = []
measures[resource.cost].append({
'timestamp': timestamp,
'value': metric['rating']['price'],
})
nb_measures += 2
if nb_measures >= CONF.storage_gnocchi.measure_chunk_size:
LOG.debug('Pushing {} measures to gnocchi.'.format(
nb_measures))
self._push_measures_to_gnocchi(measures)
measures = {}
nb_measures = 0
LOG.debug('Pushing {} measures to gnocchi.'.format(nb_measures))
self._push_measures_to_gnocchi(measures)
def _get_ck_resource_types(self):
types = self._conn.resource_type.list()
return [gtype['name'] for gtype in types
if gtype['name'].startswith(RESOURCE_TYPE_NAME_ROOT)]
def _check_res_types(self, res_type=None):
if res_type is None:
output = self._get_ck_resource_types()
elif isinstance(res_type, Iterable):
output = res_type
else:
output = [res_type]
return sorted(output)
@staticmethod
def _check_begin_end(begin, end):
if not begin:
begin = ck_utils.get_month_start()
if not end:
end = ck_utils.get_next_month()
if isinstance(begin, six.text_type):
begin = ck_utils.iso2dt(begin)
if isinstance(begin, int):
begin = ck_utils.ts2dt(begin)
if isinstance(end, six.text_type):
end = ck_utils.iso2dt(end)
if isinstance(end, int):
end = ck_utils.ts2dt(end)
return begin, end
def _get_resource_frame(self,
cost_measure,
qty_measure,
resource):
# Getting price
price = decimal.Decimal(cost_measure[2])
price_dict = {'price': float(price)}
# Getting vol
vol_dict = {
'qty': decimal.Decimal(qty_measure[2]),
'unit': resource.get('unit'),
}
# Formatting
groupby = {
k.replace(GROUPBY_NAME_ROOT, ''): v
for k, v in resource.items() if k.startswith(GROUPBY_NAME_ROOT)
}
metadata = {
k.replace(META_NAME_ROOT, ''): v
for k, v in resource.items() if k.startswith(META_NAME_ROOT)
}
return {
'groupby': groupby,
'metadata': metadata,
'vol': vol_dict,
'rating': price_dict,
}
def _to_cloudkitty(self,
res_type,
resource,
cost_measure,
qty_measure):
start = cost_measure[0]
stop = start + datetime.timedelta(seconds=cost_measure[1])
# Period
period_dict = {
'begin': ck_utils.dt2iso(start),
'end': ck_utils.dt2iso(stop),
}
return {
'usage': {res_type: [
self._get_resource_frame(cost_measure, qty_measure, resource)],
},
'period': period_dict,
}
def _get_resource_info(self, resource_ids, start, stop):
search = {
'and': [
{
'or': [
{
'=': {'id': resource_id},
}
for resource_id in resource_ids
],
},
],
}
resources = []
marker = None
while True:
resource_chunk = self._conn.resource.search(query=search,
details=True,
marker=marker,
sorts=['id:asc'])
if len(resource_chunk) < 1:
break
marker = resource_chunk[-1]['id']
resources += resource_chunk
return {resource['id']: resource for resource in resources}
@staticmethod
def _dataframes_to_list(dataframes):
keys = sorted(dataframes.keys())
return [dataframes[key] for key in keys]
def _get_dataframes(self, measures, resource_info):
dataframes = {}
for measure in measures:
resource_type = measure['group']['type']
resource_id = measure['group']['id']
# Raw metrics do not contain all required attributes
resource = resource_info[resource_id]
dataframe = dataframes.get(measure['cost'][0])
ck_resource_type_name = resource_type.replace(
RESOURCE_TYPE_NAME_ROOT, '')
if dataframe is None:
dataframes[measure['cost'][0]] = self._to_cloudkitty(
ck_resource_type_name,
resource,
measure['cost'],
measure['qty'])
elif dataframe['usage'].get(ck_resource_type_name) is None:
dataframe['usage'][ck_resource_type_name] = [
self._get_resource_frame(
measure['cost'], measure['qty'], resource)]
else:
dataframe['usage'][ck_resource_type_name].append(
self._get_resource_frame(
measure['cost'], measure['qty'], resource))
return self._dataframes_to_list(dataframes)
@staticmethod
def _create_filters(filters, group_filters):
output = {}
if filters:
for k, v in filters.items():
output[META_NAME_ROOT + k] = v
if group_filters:
for k, v in group_filters.items():
output[GROUPBY_NAME_ROOT + k] = v
return output
def _raw_metrics_to_distinct_measures(self,
raw_cost_metrics,
raw_qty_metrics):
output = []
for cost, qty in zip(raw_cost_metrics, raw_qty_metrics):
output += [{
'cost': cost_measure,
'qty': qty['measures']['measures']['aggregated'][idx],
'group': cost['group'],
} for idx, cost_measure in enumerate(
cost['measures']['measures']['aggregated'])
]
# Sorting by timestamp, metric type and resource ID
output.sort(key=lambda x: (
x['cost'][0], x['group']['type'], x['group']['id']))
return output
def retrieve(self, begin=None, end=None,
filters=None, group_filters=None,
metric_types=None,
offset=0, limit=100, paginate=True):
begin, end = self._check_begin_end(begin, end)
metric_types = self._check_res_types(metric_types)
# Getting a list of active gnocchi resources with measures
filters = self._create_filters(filters, group_filters)
# FIXME(lukapeschke): We query all resource types in order to get the
# total amount of dataframes, but this could be done in a better way;
# ie. by not doing addtional queries once the limit is reached
raw_cost_metrics = []
raw_qty_metrics = []
for mtype in metric_types:
cost_metrics, qty_metrics = self._single_resource_type_aggregates(
begin, end, mtype, ['type', 'id'], filters, fetch_qty=True)
raw_cost_metrics += cost_metrics
raw_qty_metrics += qty_metrics
measures = self._raw_metrics_to_distinct_measures(
raw_cost_metrics, raw_qty_metrics)
result = {'total': len(measures)}
if paginate:
measures = measures[offset:limit]
if len(measures) < 1:
return {
'total': 0,
'dataframes': [],
}
resource_ids = [measure['group']['id'] for measure in measures]
resource_info = self._get_resource_info(resource_ids, begin, end)
result['dataframes'] = self._get_dataframes(measures, resource_info)
return result
def _single_resource_type_aggregates(self,
start, stop,
metric_type,
groupby,
filters,
fetch_qty=False):
search = {
'and': [
{'=': {'type': metric_type}}
]
}
search['and'] += [{'=': {k: v}} for k, v in filters.items()]
cost_op = self.default_op
output = (
self._conn.aggregates.fetch(
cost_op,
search=search,
groupby=groupby,
resource_type=metric_type,
start=start, stop=stop),
None
)
if fetch_qty:
qty_op = copy.deepcopy(self.default_op)
qty_op[2][1] = 'qty'
output = (
output[0],
self._conn.aggregates.fetch(
qty_op,
search=search,
groupby=groupby,
resource_type=metric_type,
start=start, stop=stop)
)
return output
@staticmethod
def _ungroup_type(rated_resources):
output = []
for rated_resource in rated_resources:
rated_resource['group'].pop('type', None)
new_item = True
for elem in output:
if rated_resource['group'] == elem['group']:
elem['measures']['measures']['aggregated'] \
+= rated_resource['measures']['measures']['aggregated']
new_item = False
break
if new_item:
output.append(rated_resource)
return output
def total(self, groupby=None,
begin=None, end=None,
metric_types=None,
filters=None, group_filters=None):
begin, end = self._check_begin_end(begin, end)
if groupby is None:
groupby = []
request_groupby = [
GROUPBY_NAME_ROOT + elem for elem in groupby if elem != 'type']
# We need to have a least one attribute on which to group
request_groupby.append('type')
# NOTE(lukapeschke): For now, it isn't possible to group aggregates
# from different resource types using custom attributes, so we need
# to do one request per resource type.
rated_resources = []
metric_types = self._check_res_types(metric_types)
filters = self._create_filters(filters, group_filters)
for mtype in metric_types:
resources, _ = self._single_resource_type_aggregates(
begin, end, mtype, request_groupby, filters)
for resource in resources:
# If we have found something
if len(resource['measures']['measures']['aggregated']):
rated_resources.append(resource)
# NOTE(lukapeschke): We undo what has been done previously (grouping
# per type). This is not performant. Should be fixed as soon as
# previous note is supported in gnocchi
if 'type' not in groupby:
rated_resources = self._ungroup_type(rated_resources)
output = []
for rated_resource in rated_resources:
rate = sum(measure[2] for measure in
rated_resource['measures']['measures']['aggregated'])
output_elem = {
'begin': begin,
'end': end,
'rate': rate,
}
for group in groupby:
output_elem[group] = rated_resource['group'].get(
GROUPBY_NAME_ROOT + group, '')
# If we want to group per type
if 'type' in groupby:
output_elem['type'] = rated_resource['group'].get(
'type', '').replace(RESOURCE_TYPE_NAME_ROOT, '') or ''
output.append(output_elem)
return output
35.146082
79
0.552919
793f80cdf965f44100be6d0fdb92b9721bd1c029
3,665
py
Python
utils/PUMA_helpers.py
NYCPlanning/db-equitable-development-tool
b24d83dc4092489995cabcdcb611642c1c8ee3b2
[
"MIT"
]
1
2021-12-30T21:03:56.000Z
2021-12-30T21:03:56.000Z
utils/PUMA_helpers.py
NYCPlanning/db-equitable-development-tool
b24d83dc4092489995cabcdcb611642c1c8ee3b2
[
"MIT"
]
209
2021-10-20T19:03:04.000Z
2022-03-31T21:02:37.000Z
utils/PUMA_helpers.py
NYCPlanning/db-equitable-development-tool
b24d83dc4092489995cabcdcb611642c1c8ee3b2
[
"MIT"
]
null
null
null
import geopandas as gp
from shapely.geometry import Point
import pandas as pd
from numpy import nan
import requests
from utils.geocode import from_eviction_address
geocode_functions = {"from_eviction_address": from_eviction_address}
borough_code_mapper = {
"037": "BX",
"038": "MN",
"039": "SI",
"040": "BK",
"041": "QN",
}
borough_name_mapper = {
"Bronx": "BX",
"Brooklyn": "BK",
"Manhattan": "MN",
"Queens": "QN",
"Staten Island": "SI",
}
census_races = ["anh", "bnh", "hsp", "onh", "wnh"]
dcp_pop_races = ["anh", "bnh", "hsp", "wnh"]
def puma_to_borough(record):
borough_code = record.puma[:3]
borough = borough_code_mapper.get(borough_code, None)
return borough
NYC_PUMAS_url = "https://services5.arcgis.com/GfwWNkhOj9bNBqoJ/arcgis/rest/services/NYC_Public_Use_Microdata_Areas_PUMAs_2010/FeatureServer/0/query?where=1=1&outFields=*&outSR=4326&f=pgeojson"
def clean_PUMAs(puma) -> pd.DataFrame:
"""Re-uses code from remove_state_code_from_PUMA col in access to subway, call this instead
Possible refactor: apply to dataframe and ensure that re-named column is label \"puma\" """
puma = str(puma)
puma = puma.split(".")[0]
if puma == "nan" or puma == nan:
return nan
elif puma[:2] == "36":
puma = puma[2:]
elif puma[0] != "0":
puma = "0" + puma
return puma
def NYC_PUMA_geographies() -> gp.GeoDataFrame:
res = requests.get(
"https://services5.arcgis.com/GfwWNkhOj9bNBqoJ/arcgis/rest/services/NYC_Public_Use_Microdata_Areas_PUMAs_2010/FeatureServer/0/query?where=1=1&outFields=*&outSR=4326&f=pgeojson"
)
gdf = gp.GeoDataFrame.from_features(res.json()["features"])
gdf = gdf.set_crs(res.json()["crs"]["properties"]["name"])
gdf.rename(columns={"PUMA": "puma"}, inplace=True)
gdf["puma"] = gdf["puma"].apply(clean_PUMAs)
return gdf
PUMAs = NYC_PUMA_geographies()
def assign_PUMA_col(df: pd.DataFrame, lat_col, long_col, geocode_process=None):
df.rename(columns={lat_col: "latitude", long_col: "longitude"}, inplace=True)
df["puma"] = df.apply(assign_PUMA, axis=1, args=(geocode_process,))
print(f"got {df.shape[0]} evictions to assign PUMAs to ")
print(f"assigned PUMAs to {df['puma'].notnull().sum()}")
return df
def assign_PUMA(record: gp.GeoDataFrame, geocode_process):
if pd.notnull(record.latitude) and pd.notnull(record.longitude):
return PUMA_from_coord(record)
if geocode_process:
return geocode_functions[geocode_process](record)
def PUMA_from_coord(record):
"""Don't think I need to make a geodata frame here, shapely object would do"""
record_loc = Point(record.longitude, record.latitude)
matched_PUMA = PUMAs[PUMAs.geometry.contains(record_loc)]
if matched_PUMA.empty:
return None
return matched_PUMA.puma.values[0]
def get_all_NYC_PUMAs():
"""Adopted from code in PUMS_query_manager"""
geo_ids = [
range(4001, 4019), # Brooklyn
range(3701, 3711), # Bronx
range(4101, 4115), # Queens
range(3901, 3904), # Staten Island
range(3801, 3811), # Manhattan
]
rv = []
for borough in geo_ids:
rv.extend(["0" + str(PUMA) for PUMA in borough])
return rv
def get_all_boroughs():
return ["BK", "BX", "MN", "QN", "SI"]
def filter_for_recognized_pumas(df):
"""Written for income restricted indicator but can be used for many other
indicators that have rows by puma but include some non-PUMA rows. Sometimes
we set nrows in read csv/excel but this approach is more flexible"""
return df[df["puma"].isin(get_all_NYC_PUMAs())]
30.541667
192
0.674216
793f82da98f3a6ab3b6583b2f55be105cd018ce6
6,708
py
Python
test/functional/p2p_invalid_tx.py
dev-zeo/bitcoin-pos
5fd0ccf8833a8e8e467f7c765fb6780b0b570e97
[
"MIT"
]
null
null
null
test/functional/p2p_invalid_tx.py
dev-zeo/bitcoin-pos
5fd0ccf8833a8e8e467f7c765fb6780b0b570e97
[
"MIT"
]
null
null
null
test/functional/p2p_invalid_tx.py
dev-zeo/bitcoin-pos
5fd0ccf8833a8e8e467f7c765fb6780b0b570e97
[
"MIT"
]
null
null
null
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import ZeoTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
from data import invalid_txs
class InvalidTxRequestTest(ZeoTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.setup_clean_chain = True
def bootstrap_p2p(self, *, num_connections=1):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
for _ in range(num_connections):
self.nodes[0].add_p2p_connection(P2PDataStore())
def reconnect_p2p(self, **kwargs):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(**kwargs)
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
best_block = self.nodes[0].getbestblockhash()
tip = int(best_block, 16)
best_block_time = self.nodes[0].getblock(best_block)['time']
block_time = best_block_time + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block], node, success=True)
self.log.info("Mature the block.")
self.nodes[0].generatetoaddress(100, self.nodes[0].get_deterministic_priv_key().address)
# Iterate through a list of known invalid transaction types, ensuring each is
# rejected. Some are consensus invalid and some just violate policy.
for BadTxTemplate in invalid_txs.iter_all_templates():
self.log.info("Testing invalid transaction: %s", BadTxTemplate.__name__)
template = BadTxTemplate(spend_block=block1)
tx = template.get_tx()
node.p2p.send_txs_and_test(
[tx], node, success=False,
expect_disconnect=template.expect_disconnect,
reject_reason=template.reject_reason,
)
if template.expect_disconnect:
self.log.info("Reconnecting to peer")
self.reconnect_p2p()
# Make two p2p connections to provide the node with orphans
# * p2ps[0] will send valid orphan txs (one with low fee)
# * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
self.reconnect_p2p(num_connections=2)
self.log.info('Test orphan transaction handling ... ')
# Create a root transaction that we withhold until all dependent transactions
# are sent out and in the orphan cache
SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51'
tx_withhold = CTransaction()
tx_withhold.vin.append(CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0)))
tx_withhold.vout.append(CTxOut(nValue=50 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_withhold.calc_sha256()
# Our first orphan tx with some outputs to create further orphan txs
tx_orphan_1 = CTransaction()
tx_orphan_1.vin.append(CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0)))
tx_orphan_1.vout = [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)] * 3
tx_orphan_1.calc_sha256()
# A valid transaction with low fee
tx_orphan_2_no_fee = CTransaction()
tx_orphan_2_no_fee.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0)))
tx_orphan_2_no_fee.vout.append(CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
# A valid transaction with sufficient fee
tx_orphan_2_valid = CTransaction()
tx_orphan_2_valid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1)))
tx_orphan_2_valid.vout.append(CTxOut(nValue=10 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_orphan_2_valid.calc_sha256()
# An invalid transaction with negative fee
tx_orphan_2_invalid = CTransaction()
tx_orphan_2_invalid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2)))
tx_orphan_2_invalid.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
self.log.info('Send the orphans ... ')
# Send valid orphan txs from p2ps[0]
node.p2p.send_txs_and_test([tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid], node, success=False)
# Send invalid tx from p2ps[1]
node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid], node, success=False)
assert_equal(0, node.getmempoolinfo()['size']) # Mempool should be empty
assert_equal(2, len(node.getpeerinfo())) # p2ps[1] is still connected
self.log.info('Send the withhold tx ... ')
with node.assert_debug_log(expected_msgs=["bad-txns-in-belowout"]):
node.p2p.send_txs_and_test([tx_withhold], node, success=True)
# Transactions that should end up in the mempool
expected_mempool = {
t.hash
for t in [
tx_withhold, # The transaction that is the root for all orphans
tx_orphan_1, # The orphan transaction that splits the coins
tx_orphan_2_valid, # The valid transaction (with sufficient fee)
]
}
# Transactions that do not end up in the mempool
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
# tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
if __name__ == '__main__':
InvalidTxRequestTest().main()
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import ipsec_auth_key_config
import ipsec
class authentication(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/loopback/ipv6/interface-ospfv3-conf/authentication. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure ipsec authentication for the interface.The interface IPsec configuration takes precedence over the area IPsec configuration when an area and an interface within that area use IPsec. Therefore, if you configure IPsec for an interface and an area configuration also exists that includes this interface, the interface's IPsec configuration is used by that interface. However, if you disable IPsec on an interface, IPsec is disabled on the interface even if the interface has its own, specific authentication.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ipsec_auth_key_config','__ipsec',)
_yang_name = 'authentication'
_rest_name = 'authentication'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__ipsec_auth_key_config = YANGDynClass(base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
self.__ipsec = YANGDynClass(base=ipsec.ipsec, is_container='container', presence=False, yang_name="ipsec", rest_name="ipsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u'loopback', u'ipv6', u'interface-ospfv3-conf', u'authentication']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Loopback', u'ipv6', u'ospf', u'authentication']
def _get_ipsec_auth_key_config(self):
"""
Getter method for ipsec_auth_key_config, mapped from YANG variable /routing_system/interface/loopback/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config (container)
"""
return self.__ipsec_auth_key_config
def _set_ipsec_auth_key_config(self, v, load=False):
"""
Setter method for ipsec_auth_key_config, mapped from YANG variable /routing_system/interface/loopback/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipsec_auth_key_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipsec_auth_key_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipsec_auth_key_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__ipsec_auth_key_config = t
if hasattr(self, '_set'):
self._set()
def _unset_ipsec_auth_key_config(self):
self.__ipsec_auth_key_config = YANGDynClass(base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
def _get_ipsec(self):
"""
Getter method for ipsec, mapped from YANG variable /routing_system/interface/loopback/ipv6/interface_ospfv3_conf/authentication/ipsec (container)
YANG Description: Configure ipsec authentication for the interface
"""
return self.__ipsec
def _set_ipsec(self, v, load=False):
"""
Setter method for ipsec, mapped from YANG variable /routing_system/interface/loopback/ipv6/interface_ospfv3_conf/authentication/ipsec (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipsec is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipsec() directly.
YANG Description: Configure ipsec authentication for the interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipsec.ipsec, is_container='container', presence=False, yang_name="ipsec", rest_name="ipsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipsec must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipsec.ipsec, is_container='container', presence=False, yang_name="ipsec", rest_name="ipsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__ipsec = t
if hasattr(self, '_set'):
self._set()
def _unset_ipsec(self):
self.__ipsec = YANGDynClass(base=ipsec.ipsec, is_container='container', presence=False, yang_name="ipsec", rest_name="ipsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipsec authentication for the interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
ipsec_auth_key_config = __builtin__.property(_get_ipsec_auth_key_config, _set_ipsec_auth_key_config)
ipsec = __builtin__.property(_get_ipsec, _set_ipsec)
_pyangbind_elements = {'ipsec_auth_key_config': ipsec_auth_key_config, 'ipsec': ipsec, }
62.373494
587
0.743867
793f8382aecc792a01822a24183cee5cb5f842c8
1,011
py
Python
flaskr/db.py
ztaylor2/flask-api
73378bcfefe9ac09f6e7c811c1b9aa690b05d8ec
[
"MIT"
]
null
null
null
flaskr/db.py
ztaylor2/flask-api
73378bcfefe9ac09f6e7c811c1b9aa690b05d8ec
[
"MIT"
]
null
null
null
flaskr/db.py
ztaylor2/flask-api
73378bcfefe9ac09f6e7c811c1b9aa690b05d8ec
[
"MIT"
]
null
null
null
"""Set up the sqlite database."""
import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
"""Connect to the database."""
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
"""Close the database."""
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
"""Initialize the database."""
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
"""Initialize the app."""
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
20.632653
56
0.638971
793f84308769fcb6b5f912c3b8ef38714215d0f6
3,917
py
Python
FHIR_Tester_backend/sandbox/resource_tester.py
ideaworld/FHIR_Tester
62844af2de510b65535df5ae60da03a082097df0
[
"MIT"
]
null
null
null
FHIR_Tester_backend/sandbox/resource_tester.py
ideaworld/FHIR_Tester
62844af2de510b65535df5ae60da03a082097df0
[
"MIT"
]
4
2020-06-05T17:40:18.000Z
2022-02-11T03:38:16.000Z
FHIR_Tester_backend/sandbox/resource_tester.py
bowen1993/FHIR_Tester
62844af2de510b65535df5ae60da03a082097df0
[
"MIT"
]
1
2016-11-22T01:04:16.000Z
2016-11-22T01:04:16.000Z
import os
import sys
pro_dir = os.getcwd()
sys.path.append(pro_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FHIR_Tester.settings")
from services.genomics_test_generator.fhir_genomics_test_gene import *
from services.request_sender import *
from services.create_resource import *
spec_basepath = 'resources/spec/'
resource_basepath = 'resources/json/'
def iter_all_cases(resource_type, all_cases, url,id_dict, access_token=None):
#test right cases
print 'test'
isSuccessful = True
for case in all_cases['right']:
case = set_reference(case,id_dict)
response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if isinstance(response, dict) and 'issue' in response and response['issue'][0]['severity'] == 'information':
isSuccessful = isSuccessful and True
else:
if isinstance(response, str):
hint += response
elif isinstance(response, dict):
hint += response['issue'][0]['diagnostics']
isSuccessful = isSuccessful and False
print "%s:Proper %s cases tested:%s" % (resource_type, resource_type, 'success' if isSuccessful else 'fail')
isSuccessfulFalse = True
for case_with_info in all_cases['wrong']:
case = case_with_info['case']
response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if isinstance(response, dict) and 'issue' in response and response['issue'][0]['severity'] == 'information':
isSuccessfulFalse = isSuccessfulFalse and False
else:
isSuccessfulFalse = isSuccessfulFalse and True
print "%s:Improper %s cases tested:%s" % (resource_type, resource_type, 'success' if isSuccessfulFalse else 'fail')
return isSuccessful and isSuccessfulFalse
def test_a_resource(resource_name, url, access_token=None):
print resource_name
#setup
id_dict = setup(url, access_token)
spec_filename = '%s%s.csv' % (spec_basepath, resource_name)
print spec_filename
all_cases = create_all_test_case4type(spec_filename, resource_name)
if not url.endswith('/'):
url += '/'
isSuccessful = iter_all_cases(resource_name, all_cases, '%s%s' % (url, resource_name),id_dict, access_token)
print "%s:All %s cases tested:%s" % (resource_name, resource_name, 'success' if isSuccessful else 'fail')
return
def create_all_test_case4type(resource_spec_filename,resource_type):
#load spec
csv_reader = csv.reader(open(resource_spec_filename, 'r'))
detail_dict = trans_csv_to_dict(csv_reader)
del csv_reader
#generate all cases
test_cases = create_element_test_cases(detail_dict)
right_cases, wrong_cases = create_orthogonal_test_cases(test_cases)
#wrap test cases
all_cases = {}
all_cases['right'] = []
all_cases['wrong'] = []
for case in right_cases:
case['resourceType'] = resource_type
all_cases['right'].append(case)
for case in wrong_cases:
case['case']['resourceType'] = resource_type
all_cases['wrong'].append(case)
#return all cases
return all_cases
def ana_pre_creation_result(raw_info):
processed_info = {}
for key in raw_info:
if raw_info[key] and 'issue' in raw_info[key]:
if raw_info[key]['issue'][0]['severity'] == 'information':
processed_info[key] = True
else:
processed_info[key] = False
return processed_info
def setup(url, access_token=None):
create_res, id_dict = create_pre_resources(url, 'resources', access_token)
pre_resource_result = ana_pre_creation_result(create_res)
# print pre_resource_result
status = True
for key in pre_resource_result:
status = status and pre_resource_result[key]
print "Setup:Setup:%s" % "success" if status else "fail"
return id_dict
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2020-02-21 06:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='pv',
field=models.PositiveIntegerField(default=1),
),
migrations.AddField(
model_name='post',
name='uv',
field=models.PositiveIntegerField(default=1),
),
]
22.615385
57
0.578231
793f84d95f4cea7dc9443c01f8c8128a0d110de4
7,472
py
Python
ctpn/dataset.py
CrazySummerday/ctpn.pytorch
99f6baf2780e550d7b4656ac7a7b90af9ade468f
[
"MIT"
]
38
2019-09-09T07:06:02.000Z
2022-03-07T06:39:11.000Z
ctpn/dataset.py
CrazySummerday/ctpn.pytorch
99f6baf2780e550d7b4656ac7a7b90af9ade468f
[
"MIT"
]
6
2020-09-01T02:31:35.000Z
2021-10-20T08:50:09.000Z
ctpn/dataset.py
CrazySummerday/ctpn.pytorch
99f6baf2780e550d7b4656ac7a7b90af9ade468f
[
"MIT"
]
23
2019-09-11T11:50:42.000Z
2022-01-29T18:22:32.000Z
#-*- coding:utf-8 -*-
import os
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
import xml.etree.ElementTree as ET
from ctpn.utils import cal_rpn
IMAGE_MEAN = [123.68, 116.779, 103.939]
'''
从xml文件中读取图像中的真值框
'''
def readxml(path):
gtboxes = []
xml = ET.parse(path)
for elem in xml.iter():
if 'object' in elem.tag:
for attr in list(elem):
if 'bndbox' in attr.tag:
xmin = int(round(float(attr.find('xmin').text)))
ymin = int(round(float(attr.find('ymin').text)))
xmax = int(round(float(attr.find('xmax').text)))
ymax = int(round(float(attr.find('ymax').text)))
gtboxes.append((xmin, ymin, xmax, ymax))
return np.array(gtboxes)
'''
读取VOC格式数据,返回用于训练的图像、anchor目标框、标签
'''
class VOCDataset(Dataset):
def __init__(self, datadir, labelsdir):
if not os.path.isdir(datadir):
raise Exception('[ERROR] {} is not a directory'.format(datadir))
if not os.path.isdir(labelsdir):
raise Exception('[ERROR] {} is not a directory'.format(labelsdir))
self.datadir = datadir
self.img_names = os.listdir(self.datadir)
self.labelsdir = labelsdir
def __len__(self):
return len(self.img_names)
def generate_gtboxes(self, xml_path, rescale_fac = 1.0):
base_gtboxes = readxml(xml_path)
gtboxes = []
for base_gtbox in base_gtboxes:
xmin, ymin, xmax, ymax = base_gtbox
if rescale_fac > 1.0:
xmin = int(xmin / rescale_fac)
xmax = int(xmax / rescale_fac)
ymin = int(ymin / rescale_fac)
ymax = int(ymax / rescale_fac)
prev = xmin
for i in range(xmin // 16 + 1, xmax // 16 + 1):
next = 16*i-0.5
gtboxes.append((prev, ymin, next, ymax))
prev = next
gtboxes.append((prev, ymin, xmax, ymax))
return np.array(gtboxes)
def __getitem__(self, idx):
img_name = self.img_names[idx]
img_path = os.path.join(self.datadir, img_name)
img = cv2.imread(img_path)
h, w, c = img.shape
rescale_fac = max(h, w) / 1000
if rescale_fac > 1.0:
h = int(h / rescale_fac)
w = int(w / rescale_fac)
img = cv2.resize(img,(w,h))
xml_path = os.path.join(self.labelsdir, img_name.split('.')[0]+'.xml')
gtbox = self.generate_gtboxes(xml_path, rescale_fac)
if np.random.randint(2) == 1:
img = img[:, ::-1, :]
newx1 = w - gtbox[:, 2] - 1
newx2 = w - gtbox[:, 0] - 1
gtbox[:, 0] = newx1
gtbox[:, 2] = newx2
[cls, regr] = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)
regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])
cls = np.expand_dims(cls, axis=0)
m_img = img - IMAGE_MEAN
m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()
cls = torch.from_numpy(cls).float()
regr = torch.from_numpy(regr).float()
return m_img, cls, regr
################################################################################
class ICDARDataset(Dataset):
def __init__(self, datadir, labelsdir):
if not os.path.isdir(datadir):
raise Exception('[ERROR] {} is not a directory'.format(datadir))
if not os.path.isdir(labelsdir):
raise Exception('[ERROR] {} is not a directory'.format(labelsdir))
self.datadir = datadir
self.img_names = os.listdir(self.datadir)
self.labelsdir = labelsdir
def __len__(self):
return len(self.img_names)
def box_transfer(self, coor_lists, rescale_fac = 1.0):
gtboxes = []
for coor_list in coor_lists:
coors_x = [int(coor_list[2*i]) for i in range(4)]
coors_y = [int(coor_list[2*i+1]) for i in range(4)]
xmin = min(coors_x)
xmax = max(coors_x)
ymin = min(coors_y)
ymax = max(coors_y)
if rescale_fac > 1.0:
xmin = int(xmin / rescale_fac)
xmax = int(xmax / rescale_fac)
ymin = int(ymin / rescale_fac)
ymax = int(ymax / rescale_fac)
gtboxes.append((xmin, ymin, xmax, ymax))
return np.array(gtboxes)
def box_transfer_v2(self, coor_lists, rescale_fac = 1.0):
gtboxes = []
for coor_list in coor_lists:
coors_x = [int(coor_list[2 * i]) for i in range(4)]
coors_y = [int(coor_list[2 * i + 1]) for i in range(4)]
xmin = min(coors_x)
xmax = max(coors_x)
ymin = min(coors_y)
ymax = max(coors_y)
if rescale_fac > 1.0:
xmin = int(xmin / rescale_fac)
xmax = int(xmax / rescale_fac)
ymin = int(ymin / rescale_fac)
ymax = int(ymax / rescale_fac)
prev = xmin
for i in range(xmin // 16 + 1, xmax // 16 + 1):
next = 16*i-0.5
gtboxes.append((prev, ymin, next, ymax))
prev = next
gtboxes.append((prev, ymin, xmax, ymax))
return np.array(gtboxes)
def parse_gtfile(self, gt_path, rescale_fac = 1.0):
coor_lists = list()
with open(gt_path, 'r', encoding="utf-8-sig") as f:
content = f.readlines()
for line in content:
coor_list = line.split(',')[:8]
if len(coor_list) == 8:
coor_lists.append(coor_list)
return self.box_transfer_v2(coor_lists, rescale_fac)
def draw_boxes(self,img,cls,base_anchors,gt_box):
for i in range(len(cls)):
if cls[i]==1:
pt1 = (int(base_anchors[i][0]),int(base_anchors[i][1]))
pt2 = (int(base_anchors[i][2]),int(base_anchors[i][3]))
img = cv2.rectangle(img,pt1,pt2,(200,100,100))
for i in range(gt_box.shape[0]):
pt1 = (int(gt_box[i][0]),int(gt_box[i][1]))
pt2 = (int(gt_box[i][2]),int(gt_box[i][3]))
img = cv2.rectangle(img, pt1, pt2, (100, 200, 100))
return img
def __getitem__(self, idx):
img_name = self.img_names[idx]
img_path = os.path.join(self.datadir, img_name)
img = cv2.imread(img_path)
h, w, c = img.shape
rescale_fac = max(h, w) / 1000
if rescale_fac > 1.0:
h = int(h / rescale_fac)
w = int(w / rescale_fac)
img = cv2.resize(img,(w,h))
gt_path = os.path.join(self.labelsdir, img_name.split('.')[0]+'.txt')
gtbox = self.parse_gtfile(gt_path, rescale_fac)
# random flip image
if np.random.randint(2) == 1:
img = img[:, ::-1, :]
newx1 = w - gtbox[:, 2] - 1
newx2 = w - gtbox[:, 0] - 1
gtbox[:, 0] = newx1
gtbox[:, 2] = newx2
[cls, regr] = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)
regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])
cls = np.expand_dims(cls, axis=0)
m_img = img - IMAGE_MEAN
m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()
cls = torch.from_numpy(cls).float()
regr = torch.from_numpy(regr).float()
return m_img, cls, regr
35.751196
80
0.528506
793f853a94a76980afe6ccf94dbf8caab5ef398c
11,669
py
Python
shorttext/stack/stacking.py
trendmicro/PyShortTextCategorization
86d8ad22035243dbeb1c53fe286d6ef8be9a9fd7
[
"MIT"
]
null
null
null
shorttext/stack/stacking.py
trendmicro/PyShortTextCategorization
86d8ad22035243dbeb1c53fe286d6ef8be9a9fd7
[
"MIT"
]
null
null
null
shorttext/stack/stacking.py
trendmicro/PyShortTextCategorization
86d8ad22035243dbeb1c53fe286d6ef8be9a9fd7
[
"MIT"
]
null
null
null
import pickle
import numpy as np
from keras.layers import Dense, Reshape
from keras.models import Sequential
from keras.regularizers import l2
import shorttext.utils.classification_exceptions as e
import shorttext.utils.kerasmodel_io as kerasio
from shorttext.utils.compactmodel_io import CompactIOMachine
# abstract class
class StackedGeneralization:
"""
This is an abstract class for any stacked generalization method. It is an intermediate model
that takes the results of other classifiers as the input features, and perform another classification.
The classifiers must have the :func:`~score` method that takes a string as an input argument.
More references:
David H. Wolpert, "Stacked Generalization," *Neural Netw* 5: 241-259 (1992).
M. Paz Sesmero, Agapito I. Ledezma, Araceli Sanchis, "Generating ensembles of heterogeneous classifiers using Stacked Generalization,"
*WIREs Data Mining and Knowledge Discovery* 5: 21-34 (2015).
"""
def __init__(self, intermediate_classifiers={}):
""" Initialize the stacking class instance.
:param intermediate_classifiers: dictionary, with key being a string, and the values intermediate classifiers, that have the method :func:`~score`, which takes a string as the input argument.
:type intermediate_classifiers: dict
"""
self.classifiers = intermediate_classifiers
self.classlabels = []
self.trained = False
def register_classifiers(self):
""" Register the intermediate classifiers.
It must be run before any training.
:return: None
"""
self.classifier2idx = {}
self.idx2classifier = {}
for idx, key in enumerate(self.classifiers.keys()):
self.classifier2idx[key] = idx
self.idx2classifier[idx] = key
def register_classlabels(self, labels):
""" Register output labels.
Given the labels, it gives an integer as the index for each label.
It is essential for the output model to place.
It must be run before any training.
:param labels: list of output labels
:return: None
:type labels: list
"""
self.classlabels = list(labels)
self.labels2idx = {classlabel: idx for idx, classlabel in enumerate(self.classlabels)}
def add_classifier(self, name, classifier):
""" Add a classifier.
Add a classifier to the class. The classifier must have the method :func:`~score` which
takes a string as an input argument.
:param name: name of the classifier, without spaces and any special characters
:param classifier: instance of a classifier, which has a method :func:`~score` which takes a string as an input argument
:return: None
:type name: str
:type classifier: any class with a method :func:`~score`
"""
self.classifiers[name] = classifier
self.register_classifiers()
def delete_classifier(self, name):
""" Delete a classifier.
:param name: name of the classifier to be deleted
:return: None
:type name: str
:raise: KeyError
"""
del self.classifiers[name]
self.register_classifiers()
def translate_shorttext_intfeature_matrix(self, shorttext):
""" Represent the given short text as the input matrix of the stacking class.
:param shorttext: short text
:return: input matrix of the stacking class
:type shorttext: str
:rtype: numpy.ndarray
"""
feature_matrix = np.zeros((len(self.classifier2idx), len(self.labels2idx)))
for key in self.classifier2idx:
scoredict = self.classifiers[key].score(shorttext)
for label in scoredict:
feature_matrix[self.classifier2idx[key], self.labels2idx[label]] = scoredict[label]
return feature_matrix
def convert_label_to_buckets(self, label):
""" Convert the label into an array of bucket.
Some classification algorithms, especially those of neural networks, have the output
as a serious of buckets with the correct answer being 1 in the correct label, with other being 0.
This method convert the label into the corresponding buckets.
:param label: label
:return: array of buckets
:type label: str
:rtype: numpy.ndarray
"""
buckets = np.zeros(len(self.labels2idx), dtype=np.int)
buckets[self.labels2idx[label]] = 1
return buckets
def convert_traindata_matrix(self, classdict, tobucket=True):
""" Returns a generator that returns the input matrix and the output labels for training.
:param classdict: dictionary of the training data
:param tobucket: whether to convert the label into buckets (Default: True)
:return: array of input matrix, and output labels
:type classdict: dict
:type tobucket: bool
:rtype: tuple
"""
for label in classdict:
y = self.convert_label_to_buckets(label) if tobucket else self.labels2idx[label]
for shorttext in classdict[label]:
X = self.translate_shorttext_intfeature_matrix(shorttext)
yield X, y
def train(self, classdict, *args, **kwargs):
""" Train the stacked generalization.
Not implemented. `NotImplemntedException` raised.
:param classdict: training data
:param args: arguments to be parsed
:param kwargs: arguments to be parsed
:return: None
:type classdict: dict
:type args: dict
:type kwargs: dict
:raise: NotImplementedException
"""
raise e.NotImplementedException()
def score(self, shorttext, *args, **kwargs):
""" Calculate the scores for each class labels.
Not implemented. `NotImplemntedException` raised.
:param shorttext: short text to be scored
:param args: arguments to be parsed
:param kwargs: arguments to be parsed
:return: dictionary of scores for all class labels
:type shorttext: str
:type args: dict
:type kwargs: dict
:rtype: dict
:raise: NotImplementedException
"""
raise e.NotImplementedException()
class LogisticStackedGeneralization(StackedGeneralization, CompactIOMachine):
"""
This class implements logistic regression as the stacked generalizer.
It is an intermediate model
that takes the results of other classifiers as the input features, and perform another classification.
This class saves the stacked logistic model, but not the information of the primary model.
The classifiers must have the :func:`~score` method that takes a string as an input argument.
"""
def __init__(self, intermediate_classifiers={}):
CompactIOMachine.__init__(self,
{'classifier': 'stacked_logistics'},
'stacked_logistics',
['_stackedlogistics.pkl', '_stackedlogistics.h5', '_stackedlogistics.json'])
StackedGeneralization.__init__(self, intermediate_classifiers=intermediate_classifiers)
def train(self, classdict, optimizer='adam', l2reg=0.01, bias_l2reg=0.01, nb_epoch=1000):
""" Train the stacked generalization.
:param classdict: training data
:param optimizer: optimizer to use Options: sgd, rmsprop, adagrad, adadelta, adam, adamax, nadam. (Default: 'adam', for adam optimizer)
:param l2reg: coefficients for L2-regularization (Default: 0.01)
:param bias_l2reg: coefficients for L2-regularization for bias (Default: 0.01)
:param nb_epoch: number of epochs for training (Default: 1000)
:return: None
:type classdict: dict
:type optimizer: str
:type l2reg: float
:type bias_l2reg: float
:type nb_epoch: int
"""
# register
self.register_classifiers()
self.register_classlabels(classdict.keys())
kmodel = Sequential()
kmodel.add(Reshape((len(self.classifier2idx) * len(self.labels2idx),),
input_shape=(len(self.classifier2idx), len(self.labels2idx))))
kmodel.add(Dense(units=len(classdict),
activation='sigmoid',
kernel_regularizer=l2(l2reg),
bias_regularizer=l2(bias_l2reg))
)
kmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)
Xy = [(xone, yone) for xone, yone in self.convert_traindata_matrix(classdict, tobucket=True)]
X = np.array([item[0] for item in Xy])
y = np.array([item[1] for item in Xy])
kmodel.fit(X, y, epochs=nb_epoch)
self.model = kmodel
self.trained = True
def score(self, shorttext):
""" Calculate the scores for all the class labels for the given short sentence.
Given a short sentence, calculate the classification scores for all class labels,
returned as a dictionary with key being the class labels, and values being the scores.
If the short sentence is empty, or if other numerical errors occur, the score will be `numpy.nan`.
If neither :func:`~train` nor :func:`~loadmodel` was run, it will raise `ModelNotTrainedException`.
:param shorttext: a short sentence
:return: a dictionary with keys being the class labels, and values being the corresponding classification scores
:type shorttext: str
:rtype: dict
"""
if not self.trained:
raise e.ModelNotTrainedException()
input_matrix = self.translate_shorttext_intfeature_matrix(shorttext)
prediction = self.model.predict(np.array([input_matrix]))
scoredict = {label: prediction[0][idx] for idx, label in enumerate(self.classlabels)}
return scoredict
def savemodel(self, nameprefix):
""" Save the logistic stacked model into files.
Save the stacked model into files. Note that the intermediate classifiers
are not saved. Users are advised to save those classifiers separately.
If neither :func:`~train` nor :func:`~loadmodel` was run, it will raise `ModelNotTrainedException`.
:param nameprefix: prefix of the files
:return: None
:raise: ModelNotTrainedException
:type nameprefix: str
"""
if not self.trained:
raise e.ModelNotTrainedException()
stackedmodeldict = {'classifiers': self.classifier2idx,
'classlabels': self.classlabels}
pickle.dump(stackedmodeldict, open(nameprefix+'_stackedlogistics.pkl', 'wb'))
kerasio.save_model(nameprefix+'_stackedlogistics', self.model)
def loadmodel(self, nameprefix):
""" Load the model with the given prefix.
Load the model with the given prefix of their paths. Note that the intermediate
classifiers are not loaded, and users are required to load them separately.
:param nameprefix: prefix of the model files
:return: None
:type nameprefix: str
"""
stackedmodeldict = pickle.load(open(nameprefix+'_stackedlogistics.pkl', 'rb'))
self.register_classlabels(stackedmodeldict['classlabels'])
self.classifier2idx = stackedmodeldict['classifiers']
self.idx2classifier = {val: key for key, val in self.classifier2idx.items()}
self.model = kerasio.load_model(nameprefix+'_stackedlogistics')
self.trained = True
#!/usr/bin/env python
# Copyright (c) 2018 The Arnak developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.authproxy import JSONRPCException
from test_framework.mininode import NodeConn, NetworkThread, CInv, \
msg_mempool, msg_getdata, msg_tx, mininode_lock, SAPLING_PROTO_VERSION
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, connect_nodes_bi, fail, \
initialize_chain_clean, p2p_port, start_nodes, sync_blocks, sync_mempools
from tx_expiry_helper import TestNode, create_transaction
from binascii import hexlify
class TxExpiringSoonTest(BitcoinTestFramework):
def setup_chain(self):
print "Initializing test directory " + self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
# We don't connect node 2
def send_transaction(self, testnode, block, address, expiry_height):
tx = create_transaction(self.nodes[0],
block,
address,
10.0,
expiry_height)
testnode.send_message(msg_tx(tx))
# Sync up with node after p2p messages delivered
testnode.sync_with_ping()
# Sync nodes 0 and 1
sync_blocks(self.nodes[:2])
sync_mempools(self.nodes[:2])
return tx
def verify_inv(self, testnode, tx):
# Make sure we are synced before sending the mempool message
testnode.sync_with_ping()
# Send p2p message "mempool" to receive contents from arnakd node in "inv" message
with mininode_lock:
testnode.last_inv = None
testnode.send_message(msg_mempool())
# Sync up with node after p2p messages delivered
testnode.sync_with_ping()
with mininode_lock:
msg = testnode.last_inv
assert_equal(len(msg.inv), 1)
assert_equal(tx.sha256, msg.inv[0].hash)
def send_data_message(self, testnode, tx):
# Send p2p message "getdata" to verify tx gets sent in "tx" message
getdatamsg = msg_getdata()
getdatamsg.inv = [CInv(1, tx.sha256)]
with mininode_lock:
testnode.last_notfound = None
testnode.last_tx = None
testnode.send_message(getdatamsg)
def verify_last_tx(self, testnode, tx):
# Sync up with node after p2p messages delivered
testnode.sync_with_ping()
# Verify data received in "tx" message is for tx
with mininode_lock:
incoming_tx = testnode.last_tx.tx
incoming_tx.rehash()
assert_equal(tx.sha256, incoming_tx.sha256)
def run_test(self):
testnode0 = TestNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
testnode0, "regtest", SAPLING_PROTO_VERSION))
testnode0.add_connection(connections[0])
# Start up network handling in another thread
NetworkThread().start()
testnode0.wait_for_verack()
# Verify mininodes are connected to arnakd nodes
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
assert_equal(0, peerinfo[0]["banscore"])
# Mine some blocks so we can spend
coinbase_blocks = self.nodes[0].generate(200)
node_address = self.nodes[0].getnewaddress()
# Sync nodes 0 and 1
sync_blocks(self.nodes[:2])
sync_mempools(self.nodes[:2])
# Verify block count
assert_equal(self.nodes[0].getblockcount(), 200)
assert_equal(self.nodes[1].getblockcount(), 200)
assert_equal(self.nodes[2].getblockcount(), 0)
# Mininodes send expiring soon transaction in "tx" message to arnakd node
self.send_transaction(testnode0, coinbase_blocks[0], node_address, 203)
# Assert that the tx is not in the mempool (expiring soon)
assert_equal([], self.nodes[0].getrawmempool())
assert_equal([], self.nodes[1].getrawmempool())
assert_equal([], self.nodes[2].getrawmempool())
# Mininodes send transaction in "tx" message to arnakd node
tx2 = self.send_transaction(testnode0, coinbase_blocks[1], node_address, 204)
# tx2 is not expiring soon
assert_equal([tx2.hash], self.nodes[0].getrawmempool())
assert_equal([tx2.hash], self.nodes[1].getrawmempool())
# node 2 is isolated
assert_equal([], self.nodes[2].getrawmempool())
# Verify txid for tx2
self.verify_inv(testnode0, tx2)
self.send_data_message(testnode0, tx2)
self.verify_last_tx(testnode0, tx2)
# Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
for blkhash in coinbase_blocks:
blk = self.nodes[0].getblock(blkhash, 0)
self.nodes[2].submitblock(blk)
self.nodes[2].generate(1)
# Verify block count
assert_equal(self.nodes[0].getblockcount(), 200)
assert_equal(self.nodes[1].getblockcount(), 200)
assert_equal(self.nodes[2].getblockcount(), 201)
# Reconnect node 2 to the network
connect_nodes_bi(self.nodes, 0, 2)
# Set up test node for node 2
testnode2 = TestNode()
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2],
testnode2, "regtest", SAPLING_PROTO_VERSION))
testnode2.add_connection(connections[-1])
# Verify block count
sync_blocks(self.nodes[:3])
assert_equal(self.nodes[0].getblockcount(), 201)
assert_equal(self.nodes[1].getblockcount(), 201)
assert_equal(self.nodes[2].getblockcount(), 201)
# Verify contents of mempool
assert_equal([tx2.hash], self.nodes[0].getrawmempool())
assert_equal([tx2.hash], self.nodes[1].getrawmempool())
assert_equal([], self.nodes[2].getrawmempool())
# Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
try:
rawtx2 = hexlify(tx2.serialize())
self.nodes[2].sendrawtransaction(rawtx2)
fail("Sending transaction should have failed")
except JSONRPCException as e:
assert_equal(
"tx-expiring-soon: expiryheight is 204 but should be at least 205 to avoid transaction expiring soon",
e.error['message']
)
self.send_data_message(testnode0, tx2)
# Sync up with node after p2p messages delivered
testnode0.sync_with_ping()
# Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
with mininode_lock:
assert_equal(testnode0.last_tx, None)
# Verify mininode received a "notfound" message containing the txid of tx2
with mininode_lock:
msg = testnode0.last_notfound
assert_equal(len(msg.inv), 1)
assert_equal(tx2.sha256, msg.inv[0].hash)
# Create a transaction to verify that processing of "getdata" messages is functioning
tx3 = self.send_transaction(testnode0, coinbase_blocks[2], node_address, 999)
self.send_data_message(testnode0, tx3)
self.verify_last_tx(testnode0, tx3)
# Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
self.verify_inv(testnode0, tx3)
self.verify_inv(testnode2, tx3)
# Verify contents of mempool
assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))
# Verify banscore for nodes are still zero
assert_equal(0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
assert_equal(0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
TxExpiringSoonTest().main()
39.762791
118
0.64721
793f8755a15c1e97f67c9f4997dfbe6f73a9bb8c
6,413
py
Python
docs/conf.py
dwgoltra/pyjanitor
c5e1a407ec098dbca411118c67090c85d5e687c7
[
"MIT"
]
null
null
null
docs/conf.py
dwgoltra/pyjanitor
c5e1a407ec098dbca411118c67090c85d5e687c7
[
"MIT"
]
null
null
null
docs/conf.py
dwgoltra/pyjanitor
c5e1a407ec098dbca411118c67090c85d5e687c7
[
"MIT"
]
null
null
null
"""Sphinx configuration."""
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pathlib import Path
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../examples"))
# Make a symlink in our sphinx source directory to the top-level
# examples/notebooks directory so we can include notebooks in the doc
notebooks = Path("./notebooks")
if not notebooks.exists():
print("Making symlink to ../examples/notebooks")
notebooks.symlink_to("../examples/notebooks")
# -- Project information -----------------------------------------------------
project = "pyjanitor"
now = datetime.datetime.now()
CurrentYear = str(now.year)
copyright = CurrentYear + ", PyJanitor devs"
author = "Eric J. Ma"
# The short X.Y version
version = "0.1.0"
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinxcontrib.fulltoc",
"nbsphinx",
"sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = [".md", ".rst", ".ipynb"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"logo": "logo_title.svg"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
"**": ["about.html", "navigation.html", "relations.html", "searchbox.html"]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pyjanitordoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pyjanitor.tex",
"pyjanitor Documentation",
"Eric J. Ma",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pyjanitor", "pyjanitor Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyjanitor",
"pyjanitor Documentation",
author,
"pyjanitor",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"https://docs.python.org/": None,
"https://pandas.pydata.org/pandas-docs/stable": None,
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Other options -----------------------------------------------------------
autosummary_generate = True # Make _autosummary files and include them
30.25
79
0.637455
793f8777506f2aa0b78bccc5a959964301cedafb
20,726
py
Python
pyPS4Controller/controller.py
sheeshee/pyPS4Controller
e9dc88292e6d79c8428ecfa22330236167b363c6
[
"MIT"
]
null
null
null
pyPS4Controller/controller.py
sheeshee/pyPS4Controller
e9dc88292e6d79c8428ecfa22330236167b363c6
[
"MIT"
]
null
null
null
pyPS4Controller/controller.py
sheeshee/pyPS4Controller
e9dc88292e6d79c8428ecfa22330236167b363c6
[
"MIT"
]
null
null
null
import os
import struct
import time
class Event:
def __init__(self, button_id, button_type, value, connecting_using_ds4drv):
self.button_id = button_id
self.button_type = button_type
self.value = value
self.connecting_using_ds4drv = connecting_using_ds4drv
# L joystick group #
def L3_event(self): # L3 has the same mapping on ds4drv as it does when connecting to bluetooth directly
return self.button_type == 2 and self.button_id in [1, 0]
def L3_at_rest(self):
return self.button_id in [1, 0] and self.value == 0
def L3_up(self):
return self.button_id == 1 and self.value < 0
def L3_down(self):
return self.button_id == 1 and self.value > 0
def L3_left(self):
return self.button_id == 0 and self.value < 0
def L3_right(self):
return self.button_id == 0 and self.value > 0
def L3_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 11 and self.button_type == 1 and self.value == 1
return False # cant identify this event when connected through ds4drv
def L3_released(self):
if not self.connecting_using_ds4drv:
return self.button_id == 11 and self.button_type == 1 and self.value == 0
return False # cant identify this event when connected through ds4drv
# R joystick group #
def R3_event(self):
if not self.connecting_using_ds4drv:
return self.button_type == 2 and self.button_id in [4, 3]
return self.button_type == 2 and self.button_id in [5, 2]
def R3_at_rest(self):
if not self.connecting_using_ds4drv:
return self.button_id in [4, 3] and self.value == 0
return self.button_id in [2, 5] and self.value == 0
def R3_up(self):
if not self.connecting_using_ds4drv:
return self.button_id == 4 and self.value < 0
return self.button_id == 5 and self.value < 0
def R3_down(self):
if not self.connecting_using_ds4drv:
return self.button_id == 4 and self.value > 0
return self.button_id == 5 and self.value > 0
def R3_left(self):
if not self.connecting_using_ds4drv:
return self.button_id == 3 and self.value < 0
return self.button_id == 2 and self.value < 0
def R3_right(self):
if not self.connecting_using_ds4drv:
return self.button_id == 3 and self.value > 0
return self.button_id == 2 and self.value > 0
def R3_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 12 and self.button_type == 1 and self.value == 1
return False # cant identify this event when connected through ds4drv
def R3_released(self):
if not self.connecting_using_ds4drv:
return self.button_id == 12 and self.button_type == 1 and self.value == 0
return False # cant identify this event when connected through ds4drv
# Square / Triangle / Circle / X Button group #
def circle_pressed(self):
return self.button_id == 2 and self.button_type == 1 and self.value == 1
def circle_released(self):
return self.button_id == 2 and self.button_type == 1 and self.value == 0
def x_pressed(self):
return self.button_id == 1 and self.button_type == 1 and self.value == 1
def x_released(self):
return self.button_id == 1 and self.button_type == 1 and self.value == 0
def triangle_pressed(self):
return self.button_id == 3 and self.button_type == 1 and self.value == 1
def triangle_released(self):
return self.button_id == 3 and self.button_type == 1 and self.value == 0
def square_pressed(self):
return self.button_id == 0 and self.button_type == 1 and self.value == 1
def square_released(self):
return self.button_id == 0 and self.button_type == 1 and self.value == 0
def options_pressed(self):
return self.button_id == 9 and self.button_type == 1 and self.value == 1
def options_released(self):
return self.button_id == 9 and self.button_type == 1 and self.value == 0
def share_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 8 and self.button_type == 1 and self.value == 1
return False # cant identify this event when connected through ds4drv
def share_released(self):
if not self.connecting_using_ds4drv:
return self.button_id == 8 and self.button_type == 1 and self.value == 0
return False # cant identify this event when connected through ds4drv
# N1 group #
def L1_pressed(self):
return self.button_id == 4 and self.button_type == 1 and self.value == 1
def L1_released(self):
return self.button_id == 4 and self.button_type == 1 and self.value == 0
def R1_pressed(self):
return self.button_id == 5 and self.button_type == 1 and self.value == 1
def R1_released(self):
return self.button_id == 5 and self.button_type == 1 and self.value == 0
# N2 group #
def L2_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 2 and self.button_type == 2 and (32767 >= self.value >= -32766)
return self.button_id == 3 and self.button_type == 2 and (32767 >= self.value >= -32766)
def L2_released(self):
if not self.connecting_using_ds4drv:
return self.button_id == 2 and self.button_type == 2 and self.value == -32767
return self.button_id == 3 and self.button_type == 2 and self.value == -32767
def R2_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 5 and self.button_type == 2 and (32767 >= self.value >= -32766)
return self.button_id == 4 and self.button_type == 2 and (32767 >= self.value >= -32766)
def R2_released(self):
if not self.connecting_using_ds4drv:
return self.button_id == 5 and self.button_type == 2 and self.value == -32767
return self.button_id == 4 and self.button_type == 2 and self.value == -32767
# up / down arrows #
def up_arrow_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 7 and self.button_type == 2 and self.value == -32767
return self.button_id == 10 and self.button_type == 2 and self.value == -32767
def down_arrow_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 7 and self.button_type == 2 and self.value == 32767
return self.button_id == 10 and self.button_type == 2 and self.value == 32767
def up_down_arrow_released(self):
# arrow buttons on release are not distinguishable and if you think about it,
# they are following same principle as the joystick buttons which only have 1
# state at rest which is shared between left/ right / up /down inputs
if not self.connecting_using_ds4drv:
return self.button_id == 7 and self.button_type == 2 and self.value == 0
return self.button_id == 10 and self.button_type == 2 and self.value == 0
# left / right arrows #
def left_arrow_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 6 and self.button_type == 2 and self.value == -32767
return self.button_id == 9 and self.button_type == 2 and self.value == -32767
def right_arrow_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 6 and self.button_type == 2 and self.value == 32767
return self.button_id == 9 and self.button_type == 2 and self.value == 32767
def left_right_arrow_released(self):
# arrow buttons on release are not distinguishable and if you think about it,
# they are following same principle as the joystick buttons which only have 1
# state at rest which is shared between left/ right / up /down inputs
if not self.connecting_using_ds4drv:
return self.button_id == 6 and self.button_type == 2 and self.value == 0
return self.button_id == 9 and self.button_type == 2 and self.value == 0
def playstation_button_pressed(self):
if not self.connecting_using_ds4drv:
return self.button_id == 10 and self.button_type == 1 and self.value == 1
return False # cant identify this event when connected through ds4drv
def playstation_button_released(self):
if not self.connecting_using_ds4drv:
return self.button_id == 10 and self.button_type == 1 and self.value == 0
return False # cant identify this event when connected through ds4drv
class Actions:
"""
Actions are inherited in the Controller class.
In order to bind to the controller events, subclass the Controller class and
override desired action events in this class.
"""
def __init__(self):
return
def on_x_press(self):
print("on_x_press")
def on_x_release(self):
print("on_x_release")
def on_triangle_press(self):
print("on_triangle_press")
def on_triangle_release(self):
print("on_triangle_release")
def on_circle_press(self):
print("on_circle_press")
def on_circle_release(self):
print("on_circle_release")
def on_square_press(self):
print("on_square_press")
def on_square_release(self):
print("on_square_release")
def on_L1_press(self):
print("on_L1_press")
def on_L1_release(self):
print("on_L1_release")
def on_L2_press(self, value):
print("on_L2_press: {}".format(value))
def on_L2_release(self):
print("on_L2_release")
def on_R1_press(self):
print("on_R1_press")
def on_R1_release(self):
print("on_R1_release")
def on_R2_press(self, value):
print("on_R2_press: {}".format(value))
def on_R2_release(self):
print("on_R2_release")
def on_up_arrow_press(self):
print("on_up_arrow_press")
def on_up_down_arrow_release(self):
print("on_up_down_arrow_release")
def on_down_arrow_press(self):
print("on_down_arrow_press")
def on_left_arrow_press(self):
print("on_left_arrow_press")
def on_left_right_arrow_release(self):
print("on_left_right_arrow_release")
def on_right_arrow_press(self):
print("on_right_arrow_press")
def on_L3_up(self, value):
print("on_L3_up: {}".format(value))
def on_L3_down(self, value):
print("on_L3_down: {}".format(value))
def on_L3_left(self, value):
print("on_L3_left: {}".format(value))
def on_L3_right(self, value):
print("on_L3_right: {}".format(value))
def on_L3_at_rest(self):
"""L3 joystick is at rest after the joystick was moved and let go off"""
print("on_L3_at_rest")
def on_L3_press(self):
"""L3 joystick is clicked. This event is only detected when connecting without ds4drv"""
print("on_L3_press")
def on_L3_release(self):
"""L3 joystick is released after the click. This event is only detected when connecting without ds4drv"""
print("on_L3_release")
def on_R3_up(self, value):
print("on_R3_up: {}".format(value))
def on_R3_down(self, value):
print("on_R3_down: {}".format(value))
def on_R3_left(self, value):
print("on_R3_left: {}".format(value))
def on_R3_right(self, value):
print("on_R3_right: {}".format(value))
def on_R3_at_rest(self):
"""R3 joystick is at rest after the joystick was moved and let go off"""
print("on_R3_at_rest")
def on_R3_press(self):
"""R3 joystick is clicked. This event is only detected when connecting without ds4drv"""
print("on_R3_press")
def on_R3_release(self):
"""R3 joystick is released after the click. This event is only detected when connecting without ds4drv"""
print("on_R3_release")
def on_options_press(self):
print("on_options_press")
def on_options_release(self):
print("on_options_release")
def on_share_press(self):
"""this event is only detected when connecting without ds4drv"""
print("on_share_press")
def on_share_release(self):
"""this event is only detected when connecting without ds4drv"""
print("on_share_release")
def on_playstation_button_press(self):
"""this event is only detected when connecting without ds4drv"""
print("on_playstation_button_press")
def on_playstation_button_release(self):
"""this event is only detected when connecting without ds4drv"""
print("on_playstation_button_release")
class Controller(Actions):
def __init__(
self, interface, connecting_using_ds4drv=True,
event_definition=None, event_format=None
):
"""
Initiate controller instance that is capable of listening to all events on specified input interface
:param interface: STRING aka /dev/input/js0 or any other PS4 Duelshock controller interface.
You can see all available interfaces with a command "ls -la /dev/input/"
:param connecting_using_ds4drv: BOOLEAN. If you are connecting your controller using ds4drv, then leave it set
to True. Otherwise if you are connecting directly via directly via
bluetooth/bluetoothctl, set it to False otherwise the controller
button mapping will be off.
"""
Actions.__init__(self)
self.stop = False
self.is_connected = False
self.interface = interface
self.connecting_using_ds4drv = connecting_using_ds4drv
self.debug = False # If you want to see raw event stream, set this to True.
self.black_listed_buttons = [] # set a list of blocked buttons if you dont want to process their events
if self.connecting_using_ds4drv and event_definition is None:
# when device is connected via ds4drv its sending hundreds of events for those button IDs
# thus they are blacklisted by default. Feel free to adjust this list to your linking when sub-classing
self.black_listed_buttons += [6, 7, 8, 11, 12, 13]
self.event_definition = event_definition if event_definition else Event
self.event_format = event_format if event_format else "LhBB"
self.event_size = struct.calcsize(self.event_format)
def listen(self, timeout=30, on_connect=None, on_disconnect=None):
"""
Start listening for events on a given self.interface
:param timeout: INT, seconds. How long you want to wait for the self.interface.
This allows you to start listening and connect your controller after the fact.
If self.interface does not become available in N seconds, the script will exit with exit code 1.
:param on_connect: function object, allows to register a call back when connection is established
:param on_disconnect: function object, allows to register a call back when connection is lost
:return: None
"""
def on_disconnect_callback():
self.is_connected = False
if on_disconnect is not None:
on_disconnect()
def on_connect_callback():
self.is_connected = True
if on_connect is not None:
on_connect()
def wait_for_interface():
print("Waiting for interface: {} to become available . . .".format(self.interface))
for i in range(timeout):
if os.path.exists(self.interface):
print("Successfully bound to: {}.".format(self.interface))
on_connect_callback()
return
time.sleep(1)
print("Timeout({} sec). Interface not available.".format(timeout))
exit(1)
def read_events():
try:
return _file.read(self.event_size)
except IOError:
print("Interface lost. Device disconnected?")
on_disconnect_callback()
exit(1)
wait_for_interface()
while not self.stop:
try:
_file = open(self.interface, "rb")
event = read_events()
while event:
(*tv_sec, value, button_type, button_id) = struct.unpack(self.event_format, event)
if self.debug:
print("button_id: {} button_type: {} value: {}".format(button_id, button_type, value))
if button_id not in self.black_listed_buttons:
self.__handle_event(button_id=button_id, button_type=button_type, value=value)
event = read_events()
except KeyboardInterrupt:
print("\nExiting (Ctrl + C)")
on_disconnect_callback()
exit(1)
def __handle_event(self, button_id, button_type, value):
event = self.event_definition(button_id=button_id,
button_type=button_type,
value=value,
connecting_using_ds4drv=self.connecting_using_ds4drv)
if event.R3_event():
if event.R3_at_rest():
self.on_R3_at_rest()
elif event.R3_right():
self.on_R3_right(value)
elif event.R3_left():
self.on_R3_left(value)
elif event.R3_up():
self.on_R3_up(value)
elif event.R3_down():
self.on_R3_down(value)
elif event.L3_event():
if event.L3_at_rest():
self.on_L3_at_rest()
elif event.L3_up():
self.on_L3_up(value)
elif event.L3_down():
self.on_L3_down(value)
elif event.L3_left():
self.on_L3_left(value)
elif event.L3_right():
self.on_L3_right(value)
elif event.circle_pressed():
self.on_circle_press()
elif event.circle_released():
self.on_circle_release()
elif event.x_pressed():
self.on_x_press()
elif event.x_released():
self.on_x_release()
elif event.triangle_pressed():
self.on_triangle_press()
elif event.triangle_released():
self.on_triangle_release()
elif event.square_pressed():
self.on_square_press()
elif event.square_released():
self.on_square_release()
elif event.L1_pressed():
self.on_L1_press()
elif event.L1_released():
self.on_L1_release()
elif event.L2_pressed():
self.on_L2_press(value)
elif event.L2_released():
self.on_L2_release()
elif event.R1_pressed():
self.on_R1_press()
elif event.R1_released():
self.on_R1_release()
elif event.R2_pressed():
self.on_R2_press(value)
elif event.R2_released():
self.on_R2_release()
elif event.options_pressed():
self.on_options_press()
elif event.options_released():
self.on_options_release()
elif event.left_right_arrow_released():
self.on_left_right_arrow_release()
elif event.up_down_arrow_released():
self.on_up_down_arrow_release()
elif event.left_arrow_pressed():
self.on_left_arrow_press()
elif event.right_arrow_pressed():
self.on_right_arrow_press()
elif event.up_arrow_pressed():
self.on_up_arrow_press()
elif event.down_arrow_pressed():
self.on_down_arrow_press()
elif event.playstation_button_pressed():
self.on_playstation_button_press()
elif event.playstation_button_released():
self.on_playstation_button_release()
elif event.share_pressed():
self.on_share_press()
elif event.share_released():
self.on_share_release()
elif event.R3_pressed():
self.on_R3_press()
elif event.R3_released():
self.on_R3_release()
elif event.L3_pressed():
self.on_L3_press()
elif event.L3_released():
self.on_L3_release()
"""
FactSet SCIM API
FactSet's SCIM API implementation. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.ProcuretoPaySCIM.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.ProcuretoPaySCIM.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.ProcuretoPaySCIM.model.location_resource_meta import LocationResourceMeta
from fds.sdk.ProcuretoPaySCIM.model.location_resource_reference import LocationResourceReference
globals()['LocationResourceMeta'] = LocationResourceMeta
globals()['LocationResourceReference'] = LocationResourceReference
class LocationResource(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'schemas': ([str],), # noqa: E501
'id': (str,), # noqa: E501
'external_id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'description': (str,), # noqa: E501
'address1': (str,), # noqa: E501
'address2': (str,), # noqa: E501
'address3': (str,), # noqa: E501
'locality': (str,), # noqa: E501
'region': (str,), # noqa: E501
'postal_code': (str,), # noqa: E501
'country': (str,), # noqa: E501
'phone_number': (str,), # noqa: E501
'main_location': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501
'meta': (LocationResourceMeta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'schemas': 'schemas', # noqa: E501
'id': 'id', # noqa: E501
'external_id': 'externalId', # noqa: E501
'name': 'name', # noqa: E501
'description': 'description', # noqa: E501
'address1': 'address1', # noqa: E501
'address2': 'address2', # noqa: E501
'address3': 'address3', # noqa: E501
'locality': 'locality', # noqa: E501
'region': 'region', # noqa: E501
'postal_code': 'postalCode', # noqa: E501
'country': 'country', # noqa: E501
'phone_number': 'phoneNumber', # noqa: E501
'main_location': 'mainLocation', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
'name', # noqa: E501
'description', # noqa: E501
'address1', # noqa: E501
'address2', # noqa: E501
'address3', # noqa: E501
'locality', # noqa: E501
'region', # noqa: E501
'postal_code', # noqa: E501
'country', # noqa: E501
'phone_number', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""LocationResource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
schemas ([str]): [optional] # noqa: E501
id (str): [optional] # noqa: E501
external_id (str): [optional] # noqa: E501
name (str): Name of the location.. [optional] # noqa: E501
description (str): Description of the location.. [optional] # noqa: E501
address1 (str): First line of location's address.. [optional] # noqa: E501
address2 (str): Second line of location's address.. [optional] # noqa: E501
address3 (str): Third line of location's address.. [optional] # noqa: E501
locality (str): City of location.. [optional] # noqa: E501
region (str): State or province of location.. [optional] # noqa: E501
postal_code (str): Postal code of location.. [optional] # noqa: E501
country (str): Country of location.. [optional] # noqa: E501
phone_number (str): Phone number of location.. [optional] # noqa: E501
main_location (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501
meta (LocationResourceMeta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LocationResource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
schemas ([str]): [optional] # noqa: E501
id (str): [optional] # noqa: E501
external_id (str): [optional] # noqa: E501
name (str): Name of the location.. [optional] # noqa: E501
description (str): Description of the location.. [optional] # noqa: E501
address1 (str): First line of location's address.. [optional] # noqa: E501
address2 (str): Second line of location's address.. [optional] # noqa: E501
address3 (str): Third line of location's address.. [optional] # noqa: E501
locality (str): City of location.. [optional] # noqa: E501
region (str): State or province of location.. [optional] # noqa: E501
postal_code (str): Postal code of location.. [optional] # noqa: E501
country (str): Country of location.. [optional] # noqa: E501
phone_number (str): Phone number of location.. [optional] # noqa: E501
main_location (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501
meta (LocationResourceMeta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
46.365559
121
0.56832
793f90dccd966192aaadf4aedaaabc8ac8097a25
4,944
py
Python
test/functional/p2p_node_network_limited.py
KiPa-SuJi/PaydayCoin-Core
d807d95550d955bfa9ffda2b39cad745422224e5
[
"MIT"
]
2
2020-06-12T10:12:49.000Z
2020-07-31T19:43:09.000Z
test/functional/p2p_node_network_limited.py
KiPa-SuJi/PaydayCoin-Core
d807d95550d955bfa9ffda2b39cad745422224e5
[
"MIT"
]
null
null
null
test/functional/p2p_node_network_limited.py
KiPa-SuJi/PaydayCoin-Core
d807d95550d955bfa9ffda2b39cad745422224e5
[
"MIT"
]
1
2020-12-04T13:34:46.000Z
2020-12-04T13:34:46.000Z
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The PaydayCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import PaydayCoinTestFramework
from test_framework.util import (
assert_equal,
disconnect_nodes,
connect_nodes_bi,
wait_until,
)
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(PaydayCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
self.sync_blocks()
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
self.sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
40.859504
121
0.694175
793f911c9e5bdd7d6fcd8d614d9c8555f09cb406
16,127
py
Python
python/ray/serve/config.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[
"Apache-2.0"
]
22
2018-05-08T05:52:34.000Z
2020-04-01T10:09:55.000Z
python/ray/serve/config.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[
"Apache-2.0"
]
51
2018-05-17T05:55:28.000Z
2020-03-18T06:49:49.000Z
python/ray/serve/config.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[
"Apache-2.0"
]
10
2018-04-27T10:50:59.000Z
2020-02-24T02:41:43.000Z
import inspect
import json
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import pydantic
from google.protobuf.json_format import MessageToDict
from pydantic import (
BaseModel,
NonNegativeFloat,
PositiveFloat,
NonNegativeInt,
PositiveInt,
validator,
)
from ray import cloudpickle
from ray.serve.constants import (
DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S,
DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S,
DEFAULT_HEALTH_CHECK_PERIOD_S,
DEFAULT_HEALTH_CHECK_TIMEOUT_S,
DEFAULT_HTTP_HOST,
DEFAULT_HTTP_PORT,
)
from ray.serve.generated.serve_pb2 import (
DeploymentConfig as DeploymentConfigProto,
DeploymentLanguage,
AutoscalingConfig as AutoscalingConfigProto,
ReplicaConfig as ReplicaConfigProto,
)
from ray.serve.utils import ServeEncoder
class AutoscalingConfig(BaseModel):
# Please keep these options in sync with those in
# `src/ray/protobuf/serve.proto`.
# Publicly exposed options
min_replicas: NonNegativeInt = 1
max_replicas: PositiveInt = 1
target_num_ongoing_requests_per_replica: NonNegativeInt = 1
# Private options below.
# Metrics scraping options
# How often to scrape for metrics
metrics_interval_s: PositiveFloat = 10.0
# Time window to average over for metrics.
look_back_period_s: PositiveFloat = 30.0
# Internal autoscaling configuration options
# Multiplicative "gain" factor to limit scaling decisions
smoothing_factor: PositiveFloat = 1.0
# How frequently to make autoscaling decisions
# loop_period_s: float = CONTROL_LOOP_PERIOD_S
# How long to wait before scaling down replicas
downscale_delay_s: NonNegativeFloat = 600.0
# How long to wait before scaling up replicas
upscale_delay_s: NonNegativeFloat = 30.0
@validator("max_replicas")
def max_replicas_greater_than_or_equal_to_min_replicas(cls, v, values):
if "min_replicas" in values and v < values["min_replicas"]:
raise ValueError(
f"""max_replicas ({v}) must be greater than """
f"""or equal to min_replicas """
f"""({values["min_replicas"]})!"""
)
return v
# TODO(architkulkarni): implement below
# The number of replicas to start with when creating the deployment
# initial_replicas: int = 1
# The num_ongoing_requests_per_replica error ratio (desired / current)
# threshold for overriding `upscale_delay_s`
# panic_mode_threshold: float = 2.0
# TODO(architkulkarni): Add reasonable defaults
class DeploymentConfig(BaseModel):
"""Configuration options for a deployment, to be set by the user.
Args:
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this deployment. Defaults to 1.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this deployment without receiving
a response. Defaults to 100.
user_config (Optional[Any]): Arguments to pass to the reconfigure
method of the deployment. The reconfigure method is called if
user_config is not None.
graceful_shutdown_wait_loop_s (Optional[float]): Duration
that deployment replicas will wait until there is no more work to
be done before shutting down.
graceful_shutdown_timeout_s (Optional[float]):
Controller waits for this duration to forcefully kill the replica
for shutdown.
health_check_period_s (Optional[float]):
Frequency at which the controller will health check replicas.
health_check_timeout_s (Optional[float]):
Timeout that the controller will wait for a response from the
replica's health check before marking it unhealthy.
"""
num_replicas: PositiveInt = 1
max_concurrent_queries: Optional[int] = None
user_config: Any = None
graceful_shutdown_timeout_s: NonNegativeFloat = (
DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S # noqa: E501
)
graceful_shutdown_wait_loop_s: NonNegativeFloat = (
DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S # noqa: E501
)
health_check_period_s: PositiveFloat = DEFAULT_HEALTH_CHECK_PERIOD_S
health_check_timeout_s: PositiveFloat = DEFAULT_HEALTH_CHECK_TIMEOUT_S
autoscaling_config: Optional[AutoscalingConfig] = None
# This flag is used to let replica know they are deplyed from
# a different language.
is_cross_language: bool = False
# This flag is used to let controller know which language does
# the deploymnent use.
deployment_language: Any = DeploymentLanguage.PYTHON
version: Optional[str] = None
prev_version: Optional[str] = None
class Config:
validate_assignment = True
extra = "forbid"
arbitrary_types_allowed = True
# Dynamic default for max_concurrent_queries
@validator("max_concurrent_queries", always=True)
def set_max_queries_by_mode(cls, v, values): # noqa 805
if v is None:
v = 100
else:
if v <= 0:
raise ValueError("max_concurrent_queries must be >= 0")
return v
def to_proto(self):
data = self.dict()
if data.get("user_config"):
data["user_config"] = cloudpickle.dumps(data["user_config"])
if data.get("autoscaling_config"):
data["autoscaling_config"] = AutoscalingConfigProto(
**data["autoscaling_config"]
)
return DeploymentConfigProto(**data)
def to_proto_bytes(self):
return self.to_proto().SerializeToString()
@classmethod
def from_proto(cls, proto: DeploymentConfigProto):
data = MessageToDict(
proto,
including_default_value_fields=True,
preserving_proto_field_name=True,
use_integers_for_enums=True,
)
if "user_config" in data:
if data["user_config"] != "":
data["user_config"] = cloudpickle.loads(proto.user_config)
else:
data["user_config"] = None
if "autoscaling_config" in data:
data["autoscaling_config"] = AutoscalingConfig(**data["autoscaling_config"])
if "prev_version" in data:
if data["prev_version"] == "":
data["prev_version"] = None
if "version" in data:
if data["version"] == "":
data["version"] = None
return cls(**data)
@classmethod
def from_proto_bytes(cls, proto_bytes: bytes):
proto = DeploymentConfigProto.FromString(proto_bytes)
return cls.from_proto(proto)
@classmethod
def from_default(cls, ignore_none: bool = False, **kwargs):
"""Creates a default DeploymentConfig and overrides it with kwargs.
Only accepts the same keywords as the class. Passing in any other
keyword raises a ValueError.
Args:
ignore_none (bool): When True, any valid keywords with value None
are ignored, and their values stay default. Invalid keywords
still raise a TypeError.
Raises:
TypeError: when a keyword that's not an argument to the class is
passed in.
"""
config = cls()
valid_config_options = set(config.dict().keys())
# Friendly error if a non-DeploymentConfig kwarg was passed in
for key, val in kwargs.items():
if key not in valid_config_options:
raise TypeError(
f'Got invalid Deployment config option "{key}" '
f"(with value {val}) as keyword argument. All Deployment "
"config options must come from this list: "
f"{list(valid_config_options)}."
)
if ignore_none:
kwargs = {key: val for key, val in kwargs.items() if val is not None}
for key, val in kwargs.items():
config.__setattr__(key, val)
return config
class ReplicaConfig:
def __init__(
self,
deployment_def: Union[Callable, str],
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
ray_actor_options=None,
):
# Validate that deployment_def is an import path, function, or class.
self.import_path = None
if isinstance(deployment_def, str):
self.func_or_class_name = deployment_def
self.import_path = deployment_def
elif inspect.isfunction(deployment_def):
self.func_or_class_name = deployment_def.__name__
if init_args:
raise ValueError("init_args not supported for function deployments.")
if init_kwargs:
raise ValueError("init_kwargs not supported for function deployments.")
elif inspect.isclass(deployment_def):
self.func_or_class_name = deployment_def.__name__
else:
raise TypeError(
"Deployment must be a function or class, it is {}.".format(
type(deployment_def)
)
)
self.serialized_deployment_def = cloudpickle.dumps(deployment_def)
self.init_args = init_args if init_args is not None else ()
self.init_kwargs = init_kwargs if init_kwargs is not None else {}
if ray_actor_options is None:
self.ray_actor_options = {}
else:
self.ray_actor_options = ray_actor_options
self.resource_dict = {}
self._validate()
def _validate(self):
if not isinstance(self.ray_actor_options, dict):
raise TypeError("ray_actor_options must be a dictionary.")
disallowed_ray_actor_options = {
"args",
"kwargs",
"max_concurrency",
"max_restarts",
"max_task_retries",
"name",
"namespace",
"lifetime",
"placement_group",
"placement_group_bundle_index",
"placement_group_capture_child_tasks",
"max_pending_calls",
"scheduling_strategy",
}
for option in disallowed_ray_actor_options:
if option in self.ray_actor_options:
raise ValueError(
f"Specifying {option} in ray_actor_options is not allowed."
)
# TODO(suquark): reuse options validation of remote function/actor.
# Ray defaults to zero CPUs for placement, we default to one here.
if self.ray_actor_options.get("num_cpus") is None:
self.ray_actor_options["num_cpus"] = 1
num_cpus = self.ray_actor_options["num_cpus"]
if not isinstance(num_cpus, (int, float)):
raise TypeError("num_cpus in ray_actor_options must be an int or a float.")
elif num_cpus < 0:
raise ValueError("num_cpus in ray_actor_options must be >= 0.")
self.resource_dict["CPU"] = num_cpus
if self.ray_actor_options.get("num_gpus") is None:
self.ray_actor_options["num_gpus"] = 0
num_gpus = self.ray_actor_options["num_gpus"]
if not isinstance(num_gpus, (int, float)):
raise TypeError("num_gpus in ray_actor_options must be an int or a float.")
elif num_gpus < 0:
raise ValueError("num_gpus in ray_actor_options must be >= 0.")
self.resource_dict["GPU"] = num_gpus
# Serve deployments use Ray's default for actor memory.
self.ray_actor_options.setdefault("memory", None)
memory = self.ray_actor_options["memory"]
if memory is not None and not isinstance(memory, (int, float)):
raise TypeError(
"memory in ray_actor_options must be an int, a float, or None."
)
elif memory is not None and memory <= 0:
raise ValueError("memory in ray_actor_options must be > 0.")
self.resource_dict["memory"] = memory
object_store_memory = self.ray_actor_options.get("object_store_memory")
if not isinstance(object_store_memory, (int, float, type(None))):
raise TypeError(
"object_store_memory in ray_actor_options must be an int, float "
"or None."
)
elif object_store_memory is not None and object_store_memory < 0:
raise ValueError("object_store_memory in ray_actor_options must be >= 0.")
self.resource_dict["object_store_memory"] = object_store_memory
if self.ray_actor_options.get("resources") is None:
self.ray_actor_options["resources"] = {}
custom_resources = self.ray_actor_options["resources"]
if not isinstance(custom_resources, dict):
raise TypeError("resources in ray_actor_options must be a dictionary.")
self.resource_dict.update(custom_resources)
@classmethod
def from_proto(
cls, proto: ReplicaConfigProto, deployment_language: DeploymentLanguage
):
deployment_def = None
if proto.serialized_deployment_def != b"":
if deployment_language == DeploymentLanguage.PYTHON:
deployment_def = cloudpickle.loads(proto.serialized_deployment_def)
else:
# TODO use messagepack
deployment_def = cloudpickle.loads(proto.serialized_deployment_def)
init_args = (
cloudpickle.loads(proto.init_args) if proto.init_args != b"" else None
)
init_kwargs = (
cloudpickle.loads(proto.init_kwargs) if proto.init_kwargs != b"" else None
)
ray_actor_options = (
json.loads(proto.ray_actor_options)
if proto.ray_actor_options != ""
else None
)
return ReplicaConfig(deployment_def, init_args, init_kwargs, ray_actor_options)
@classmethod
def from_proto_bytes(
cls, proto_bytes: bytes, deployment_language: DeploymentLanguage
):
proto = ReplicaConfigProto.FromString(proto_bytes)
return cls.from_proto(proto, deployment_language)
def to_proto(self):
data = {
"serialized_deployment_def": self.serialized_deployment_def,
}
if self.init_args:
data["init_args"] = cloudpickle.dumps(self.init_args)
if self.init_kwargs:
data["init_kwargs"] = cloudpickle.dumps(self.init_kwargs)
if self.ray_actor_options:
data["ray_actor_options"] = json.dumps(
self.ray_actor_options, cls=ServeEncoder
)
return ReplicaConfigProto(**data)
def to_proto_bytes(self):
return self.to_proto().SerializeToString()
class DeploymentMode(str, Enum):
NoServer = "NoServer"
HeadOnly = "HeadOnly"
EveryNode = "EveryNode"
FixedNumber = "FixedNumber"
class HTTPOptions(pydantic.BaseModel):
# Documentation inside serve.start for user's convenience.
host: Optional[str] = DEFAULT_HTTP_HOST
port: int = DEFAULT_HTTP_PORT
middlewares: List[Any] = []
location: Optional[DeploymentMode] = DeploymentMode.HeadOnly
num_cpus: int = 0
root_url: str = ""
root_path: str = ""
fixed_number_replicas: Optional[int] = None
fixed_number_selection_seed: int = 0
@validator("location", always=True)
def location_backfill_no_server(cls, v, values):
if values["host"] is None or v is None:
return DeploymentMode.NoServer
return v
@validator("fixed_number_replicas", always=True)
def fixed_number_replicas_should_exist(cls, v, values):
if values["location"] == DeploymentMode.FixedNumber and v is None:
raise ValueError(
"When location='FixedNumber', you must specify "
"the `fixed_number_replicas` parameter."
)
return v
class Config:
validate_assignment = True
extra = "forbid"
arbitrary_types_allowed = True
37.158986
88
0.645749
793f925198a3ccb544a87e0e170e3eaeeadee3cd
433
py
Python
examples/basic_shapes.py
abey79/lines
09fbd84f9eaaba40d24b07227e8c95c0493a75c2
[
"MIT"
]
39
2019-10-23T09:19:34.000Z
2022-02-16T21:44:12.000Z
examples/basic_shapes.py
abey79/lines
09fbd84f9eaaba40d24b07227e8c95c0493a75c2
[
"MIT"
]
2
2020-11-13T14:06:02.000Z
2021-09-29T08:18:44.000Z
examples/basic_shapes.py
abey79/lines
09fbd84f9eaaba40d24b07227e8c95c0493a75c2
[
"MIT"
]
2
2020-11-06T22:21:00.000Z
2021-06-09T18:40:02.000Z
from lines import Cube, Cylinder, Pyramid, Scene
def main():
# Setup the scene
scene = Scene()
scene.add(Cube(translate=(2, 0, 0)))
scene.add(Pyramid())
scene.add(Cylinder(scale=(0.5, 0.5, 1), translate=(-2, 0, 0)))
scene.look_at((2, 6, 1.5), (0, 0, 0))
scene.perspective(70, 0.1, 10)
# Render and display the scene
scene.render().show(show_hidden=True)
if __name__ == "__main__":
main()
from __future__ import division
from __future__ import print_function
import math
import pandas as pd
import plotly.graph_objs as go
from plotly.offline import plot
import cea.plots.solar_technology_potentials
from cea.plots.variable_naming import LOGO, COLOR, NAMING
__author__ = "Shanshan Hsieh"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Shanshan Hsieh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
class PvtMonthlyPlot(cea.plots.solar_technology_potentials.SolarTechnologyPotentialsPlotBase):
"""Implement the pv-electricity-potential plot"""
name = "PVT Electricity/Thermal Potential"
def __init__(self, project, parameters, cache):
super(PvtMonthlyPlot, self).__init__(project, parameters, cache)
self.input_files = [(self.locator.PVT_totals, [])] + [(self.locator.PVT_results, [building])
for building in self.buildings]
self.__data_frame = None
self.__E_analysis_fields_used = None
self.__Q_analysis_fields_used = None
@property
def data_frame(self):
"""This get's used a couple of times in the calculations, avoid hitting the PlotCache each time"""
if self.__data_frame is None:
self.__data_frame = self.PVT_hourly_aggregated_kW
return self.__data_frame
@property
def E_analysis_fields_used(self):
if self.__E_analysis_fields_used is None:
self.__E_analysis_fields_used = self.data_frame.columns[
self.data_frame.columns.str.endswith('_E_kWh')].tolist()
return self.__E_analysis_fields_used
@property
def Q_analysis_fields_used(self):
if self.__Q_analysis_fields_used is None:
self.__Q_analysis_fields_used = self.data_frame.columns[
self.data_frame.columns.str.endswith('_Q_kWh')].tolist()
return self.__Q_analysis_fields_used
@property
def layout(self):
analysis_range = calc_range(self.data_frame, self.E_analysis_fields_used, self.Q_analysis_fields_used)
return go.Layout(barmode='stack',
yaxis=dict(title='PVT Electricity/Heat production [MWh]', rangemode='tozero',
scaleanchor='y2', range=analysis_range),
yaxis2=dict(overlaying='y', anchor='x', range=analysis_range))
def calc_graph(self):
# calculate graph
graph = []
data_frame = self.data_frame
monthly_df = (data_frame.set_index("DATE").resample("M").sum() / 1000).round(2) # to MW
monthly_df["month"] = monthly_df.index.strftime("%B")
E_total = monthly_df[self.E_analysis_fields_used].sum(axis=1)
Q_total = monthly_df[self.Q_analysis_fields_used].sum(axis=1)
for field in self.Q_analysis_fields_used:
y = monthly_df[field]
total_perc = (y.divide(Q_total) * 100).round(2).values
total_perc_txt = ["(" + str(x) + " %)" for x in total_perc]
trace1 = go.Bar(x=monthly_df["month"], y=y, yaxis='y2', name=field.split('_kWh', 1)[0], text=total_perc_txt,
marker=dict(color=COLOR[field], line=dict(color="rgb(105,105,105)", width=1)),
opacity=1, width=0.3, offset=0, legendgroup=field.split('_Q_kWh', 1)[0])
graph.append(trace1)
for field in self.E_analysis_fields_used:
y = monthly_df[field]
total_perc = (y / E_total * 100).round(2).values
total_perc_txt = ["(" + str(x) + " %)" for x in total_perc]
trace2 = go.Bar(x=monthly_df["month"], y=y, name=field.split('_kWh', 1)[0], text=total_perc_txt,
marker=dict(color=COLOR[field]), width=0.3, offset=-0.35,
legendgroup=field.split('_E_kWh', 1)[0])
graph.append(trace2)
return graph
def calc_table(self):
analysis_fields_used = []
total_perc = []
data_frame = self.data_frame
E_analysis_fields_used = self.E_analysis_fields_used
Q_analysis_fields_used = self.Q_analysis_fields_used
# calculation for electricity production
E_total = (data_frame[E_analysis_fields_used].sum(axis=0) / 1000).round(2).tolist() # to MW
# calculate top three potentials
E_anchors = []
E_names = []
monthly_df = (data_frame.set_index("DATE").resample("M").sum() / 1000).round(2) # to MW
monthly_df["month"] = monthly_df.index.strftime("%B")
monthly_df.set_index("month", inplace=True)
if sum(E_total) > 0:
E_total_perc = [str(x) + " (" + str(round(x / sum(E_total) * 100, 1)) + " %)" for x in E_total]
for field in E_analysis_fields_used:
E_anchors.append(', '.join(calc_top_three_anchor_loads(monthly_df, field)))
E_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
else:
E_total_perc = ['0 (0%)'] * len(E_total)
for field in E_analysis_fields_used:
E_anchors.append('-')
E_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
analysis_fields_used.extend(E_analysis_fields_used)
total_perc.extend(E_total_perc)
# calculation for heat production
Q_total = (data_frame[Q_analysis_fields_used].sum(axis=0) / 1000).round(2).tolist() # to MW
Q_names = []
Q_anchors = []
if sum(Q_total) > 0:
Q_total_perc = [str(x) + " (" + str(round(x / sum(Q_total) * 100, 1)) + " %)" for x in Q_total]
for field in Q_analysis_fields_used:
Q_anchors.append(', '.join(calc_top_three_anchor_loads(monthly_df, field)))
Q_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
else:
Q_total_perc = ['0 (0%)'] * len(Q_total)
for field in Q_analysis_fields_used:
Q_anchors.append('-')
Q_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
analysis_fields_used.extend(Q_analysis_fields_used)
total_perc.extend(Q_total_perc)
column_names = ['Surfaces', 'Total electricity production [MWh/yr]', 'Months with the highest potentials',
'Surfaces ', 'Total heat production [MWh/yr]', 'Months with the highest potentials']
column_values = [E_names, E_total_perc, E_anchors, Q_names, Q_total_perc, Q_anchors]
table_df = pd.DataFrame({cn: cv for cn, cv in zip(column_names, column_values)}, columns=column_names)
return table_df
def pvt_district_monthly(data_frame, analysis_fields, title, output_path):
E_analysis_fields_used = data_frame.columns[data_frame.columns.isin(analysis_fields[0:5])].tolist()
Q_analysis_fields_used = data_frame.columns[data_frame.columns.isin(analysis_fields[5:10])].tolist()
range = calc_range(data_frame, E_analysis_fields_used, Q_analysis_fields_used)
# CALCULATE GRAPH
traces_graphs = calc_graph(E_analysis_fields_used, Q_analysis_fields_used, data_frame)
# CALCULATE TABLE
traces_table = calc_table(E_analysis_fields_used, Q_analysis_fields_used, data_frame)
# PLOT GRAPH
traces_graphs.append(traces_table)
layout = go.Layout(images=LOGO, title=title, barmode='stack',
yaxis=dict(title='PVT Electricity/Heat production [MWh]', domain=[0.35, 1], rangemode='tozero',
scaleanchor='y2', range=range),
yaxis2=dict(overlaying='y', anchor='x', domain=[0.35, 1], range=range))
fig = go.Figure(data=traces_graphs, layout=layout)
plot(fig, auto_open=False, filename=output_path)
return {'data': traces_graphs, 'layout': layout}
def calc_range(data_frame, E_analysis_fields_used, Q_analysis_fields_used):
monthly_df = (data_frame.set_index("DATE").resample("M").sum() / 1000).round(2) # to MW
monthly_df["month"] = monthly_df.index.strftime("%B")
E_total = monthly_df[E_analysis_fields_used].sum(axis=1)
Q_total = monthly_df[Q_analysis_fields_used].sum(axis=1)
y_axis_max = math.ceil(max(E_total.max(), Q_total.max()))
y_asix_min = min(0, min(Q_total.min(), E_total.min()))
return [y_asix_min, y_axis_max]
def calc_graph(E_analysis_fields_used, Q_analysis_fields_used, data_frame):
# calculate graph
graph = []
monthly_df = (data_frame.set_index("DATE").resample("M").sum() / 1000).round(2) # to MW
monthly_df["month"] = monthly_df.index.strftime("%B")
E_total = monthly_df[E_analysis_fields_used].sum(axis=1)
Q_total = monthly_df[Q_analysis_fields_used].sum(axis=1)
for field in Q_analysis_fields_used:
y = monthly_df[field]
total_perc = (y.divide(Q_total) * 100).round(2).values
total_perc_txt = ["(" + str(x) + " %)" for x in total_perc]
trace1 = go.Bar(x=monthly_df["month"], y=y, yaxis='y2', name=field.split('_kWh', 1)[0], text=total_perc_txt,
marker=dict(color=COLOR[field], line=dict(color="rgb(105,105,105)", width=1)),
opacity=1, width=0.3, offset=0, legendgroup=field.split('_Q_kWh', 1)[0])
graph.append(trace1)
for field in E_analysis_fields_used:
y = monthly_df[field]
total_perc = (y / E_total * 100).round(2).values
total_perc_txt = ["(" + str(x) + " %)" for x in total_perc]
trace2 = go.Bar(x=monthly_df["month"], y=y, name=field.split('_kWh', 1)[0], text=total_perc_txt,
marker=dict(color=COLOR[field]), width=0.3, offset=-0.35,
legendgroup=field.split('_E_kWh', 1)[0])
graph.append(trace2)
return graph
def calc_table(E_analysis_fields_used, Q_analysis_fields_used, data_frame):
analysis_fields_used = []
total_perc = []
# calculation for electricity production
E_total = (data_frame[E_analysis_fields_used].sum(axis=0) / 1000).round(2).tolist() # to MW
# calculate top three potentials
E_anchors = []
E_names = []
monthly_df = (data_frame.set_index("DATE").resample("M").sum() / 1000).round(2) # to MW
monthly_df["month"] = monthly_df.index.strftime("%B")
monthly_df.set_index("month", inplace=True)
if sum(E_total) > 0:
E_total_perc = [str(x) + " (" + str(round(x / sum(E_total) * 100, 1)) + " %)" for x in E_total]
for field in E_analysis_fields_used:
E_anchors.append(calc_top_three_anchor_loads(monthly_df, field))
E_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
else:
E_total_perc = ['0 (0%)'] * len(E_total)
for field in E_analysis_fields_used:
E_anchors.append('-')
E_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
analysis_fields_used.extend(E_analysis_fields_used)
total_perc.extend(E_total_perc)
# calculation for heat production
Q_total = (data_frame[Q_analysis_fields_used].sum(axis=0) / 1000).round(2).tolist() # to MW
Q_names = []
Q_anchors = []
if sum(Q_total) > 0:
Q_total_perc = [str(x) + " (" + str(round(x / sum(Q_total) * 100, 1)) + " %)" for x in Q_total]
for field in Q_analysis_fields_used:
Q_anchors.append(calc_top_three_anchor_loads(monthly_df, field))
Q_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
else:
Q_total_perc = ['0 (0%)'] * len(Q_total)
for field in Q_analysis_fields_used:
Q_anchors.append('-')
Q_names.append(NAMING[field].split(' ')[6] + ' (' + field.split('_kWh', 1)[0] + ')')
analysis_fields_used.extend(Q_analysis_fields_used)
total_perc.extend(Q_total_perc)
table = go.Table(domain=dict(x=[0, 1], y=[0.0, 0.2]),
header=dict(values=['Surfaces', 'Total electricity production [MWh/yr]',
'Months with the highest potentials', 'Surfaces',
'Total heat production [MWh/yr]', 'Months with the highest potentials']),
cells=dict(values=[E_names, E_total_perc, E_anchors, Q_names, Q_total_perc, Q_anchors]))
return table
def calc_top_three_anchor_loads(data_frame, field):
data_frame = data_frame.sort_values(by=field, ascending=False)
anchor_list = data_frame[:3].index.values
return anchor_list
def main():
"""Test this plot"""
import cea.config
import cea.inputlocator
import cea.plots.cache
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
cache = cea.plots.cache.PlotCache(config.project)
# cache = cea.plots.cache.NullPlotCache()
weather_path = locator.get_weather_file()
PvtMonthlyPlot(config.project, {'buildings': None,
'scenario-name': config.scenario_name,
'weather': weather_path},
cache).plot(auto_open=True)
PvtMonthlyPlot(config.project, {'buildings': locator.get_zone_building_names()[0:2],
'scenario-name': config.scenario_name,
'weather': weather_path},
cache).plot(auto_open=True)
PvtMonthlyPlot(config.project, {'buildings': [locator.get_zone_building_names()[0]],
'scenario-name': config.scenario_name,
'weather': weather_path},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
46.871622
120
0.62527
793f93bb640dea8d0bfa33dbd16683dab3070d89
571
py
Python
examples/nD_multiscale_image.py
mrocklin/napari
b61d9ae570e30091a97b6c76e37cd95fe5b296b6
[
"BSD-3-Clause"
]
1
2022-03-01T19:38:06.000Z
2022-03-01T19:38:06.000Z
examples/nD_multiscale_image.py
mrocklin/napari
b61d9ae570e30091a97b6c76e37cd95fe5b296b6
[
"BSD-3-Clause"
]
17
2020-06-11T21:02:03.000Z
2021-02-02T19:10:19.000Z
examples/nD_multiscale_image.py
mrocklin/napari
b61d9ae570e30091a97b6c76e37cd95fe5b296b6
[
"BSD-3-Clause"
]
1
2020-07-19T18:03:35.000Z
2020-07-19T18:03:35.000Z
"""
Displays an nD multiscale image
"""
from skimage.transform import pyramid_gaussian
import napari
import numpy as np
# create multiscale from random data
base = np.random.random((1536, 1536))
base = np.array([base * (8 - i) / 8 for i in range(8)])
print('base shape', base.shape)
multiscale = list(
pyramid_gaussian(base, downscale=2, max_layer=2, multichannel=False)
)
print('multiscale level shapes: ', [p.shape for p in multiscale])
with napari.gui_qt():
# add image multiscale
napari.view_image(multiscale, contrast_limits=[0, 1], multiscale=True)
25.954545
74
0.726795
793f978012241efbd22ad38a243fb9578449b9c2
3,212
py
Python
tests/test_loadtweets.py
rmotr-group-projects/wdd-w2-twitter-commands
404ddb6e9121eab562c113610b3b54f296e6c47e
[
"MIT"
]
null
null
null
tests/test_loadtweets.py
rmotr-group-projects/wdd-w2-twitter-commands
404ddb6e9121eab562c113610b3b54f296e6c47e
[
"MIT"
]
6
2020-06-05T22:15:17.000Z
2022-03-11T23:56:03.000Z
tests/test_loadtweets.py
ine-rmotr-projects/wdd-w2-twitter-commands
404ddb6e9121eab562c113610b3b54f296e6c47e
[
"MIT"
]
6
2016-09-03T12:58:21.000Z
2016-11-15T16:52:57.000Z
from django.utils.six import StringIO
from django.core.management import call_command
from django.test import TestCase
from django.core.management.base import CommandError
from twitter.models import Tweet, User
class LoadTweetsTestCase(TestCase):
def setUp(self):
super(LoadTweetsTestCase, self).setUp()
self.user = User.objects.create_user(
username='rmotr_com', password='password123')
self.out = StringIO()
def test_load_tweets_command(self):
"""Should import tweets from twitter API when given username is valid"""
self.assertEqual(Tweet.objects.count(), 0)
args = [self.user.username]
call_command('loadtweets', stdout=self.out, *args)
self.assertEqual(Tweet.objects.count(), 10)
self.assertTrue(
'Finished. 10 tweets have been imported.' in self.out.getvalue())
for tweet in Tweet.objects.all():
self.assertEqual(tweet.user, self.user)
def test_load_tweets_command_count(self):
"""Should import the amount of tweets specified in the --count argument"""
self.assertEqual(Tweet.objects.count(), 0)
args = [self.user.username, "--count=20"]
call_command('loadtweets', stdout=self.out, *args)
self.assertEqual(Tweet.objects.count(), 20)
self.assertTrue(
'Finished. 20 tweets have been imported.' in self.out.getvalue())
for tweet in Tweet.objects.all():
self.assertEqual(tweet.user, self.user)
def test_load_tweets_command_username_not_found(self):
"""Should raise CommandError when given username does not exist"""
self.assertEqual(Tweet.objects.count(), 0)
args = ["INVALID"]
with self.assertRaises(CommandError) as e:
call_command('loadtweets', stdout=self.out, *args)
self.assertEqual(e.exception.args[0], 'User "INVALID" does not exist')
self.assertEqual(Tweet.objects.count(), 0)
def test_load_tweets_command_invalid_username(self):
"""Should raise TypeError when given username is not a string"""
self.assertEqual(Tweet.objects.count(), 0)
args = [123]
with self.assertRaises(TypeError) as e:
call_command('loadtweets', stdout=self.out, *args)
self.assertEqual(e.exception.args[0], "'int' object is not subscriptable")
self.assertEqual(Tweet.objects.count(), 0)
def test_load_tweets_command_repeated_tweets(self):
"""Should not load tweets that already exists in the DB"""
self.assertEqual(Tweet.objects.count(), 0)
args = [self.user.username, "--count=20"]
call_command('loadtweets', stdout=self.out, *args)
self.assertTrue(
'Finished. 20 tweets have been imported.' in self.out.getvalue())
self.assertEqual(Tweet.objects.count(), 20)
for tweet in Tweet.objects.all():
self.assertEqual(tweet.user, self.user)
args = [self.user.username, "--count=50"]
call_command('loadtweets', stdout=self.out, *args)
self.assertTrue(
'Finished. 30 tweets have been imported.' in self.out.getvalue())
self.assertEqual(Tweet.objects.count(), 50)
# coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.inline_response_200_42_utilisation_totals import InlineResponse20042UtilisationTotals
class TestInlineResponse20042UtilisationTotals(unittest.TestCase):
""" InlineResponse20042UtilisationTotals unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20042UtilisationTotals(self):
"""
Test InlineResponse20042UtilisationTotals
"""
model = swagger_client.models.inline_response_200_42_utilisation_totals.InlineResponse20042UtilisationTotals()
if __name__ == '__main__':
unittest.main()
#!/bin/env python
from __future__ import print_function
import os;
import re;
import sys;
java = False
cpp = False
outstandingRequests = {}
currentFids = []
def findNextEvent(input):
event = re.compile("^event: (.*)$")
line = input.readline()
while (line):
if event.match(line):
return line.split()
line = input.readline()
# we use 'requestMsg domainType=RSSL_DMT_MARKET_PRICE' messages because they contain all the FIDs
# being requested. The snapshot refresh might not contain all FIDs
#
# seems that in java if the reissue doesn't cause any changed in FIDs no request is sent.
# Use UPDATE or next event: to detect this
def findNextRequest(input):
if java:
beginMsg = re.compile("^<REQUEST domainType=\"MARKET_PRICE\" .*")
endMsg = re.compile("^</REQUEST>$")
else:
beginMsg = re.compile("^<requestMsg domainType=\"RSSL_DMT_MARKET_PRICE\" .*")
endMsg = re.compile("^</requestMsg>$")
eventMsg = re.compile("^event: .*")
streamPos = input.tell()
line = input.readline()
while (line):
if eventMsg.match(line):
input.seek(streamPos)
return None
if beginMsg.match(line):
msg = line
line = input.readline()
while(line):
msg += line
if endMsg.match(line):
return msg
line = input.readline()
streamPos = input.tell()
line = input.readline()
return None
def extractFidsFromRequestMsg(msg):
viewMatcher = re.compile("^.*:ViewType.*:ViewData.*(<array .*</array>).*$", re.DOTALL)
view = viewMatcher.match(msg)
if view:
arrayMatcher = re.compile("arrayEntry data=\"([A-Z0-9_]+)\"/>")
return arrayMatcher.findall(view.group(1))
else:
return []
def verify(input, event, previousFids, currentFids):
nextRequest = findNextRequest(input)
if nextRequest == None:
# handles the case of reissues that do not change the view; seems to occur on java
if event[1] == "requesting" and event[4] == "reissue":
return
print("did not find a request for event ", event)
sys.exit(1)
else:
extractedFids = extractFidsFromRequestMsg(nextRequest)
if extractedFids == currentFids:
return
elif not extractedFids: # if the fids did not change, the request has no fids (containerType="RSSL_DT_NO_DATA")
if previousFids == currentFids:
return
else:
print("request message had no fids but previous fids (", previousFids, ") did not match new fids (", currentFids, ")")
sys.exit(1)
else:
print("failed to matched fids: expected", currentFids, "; got ", extractedFids)
sys.exit(1)
print("version:", sys.version)
# was our test program Java or Cpp-C
with open("out", "r") as input:
cppLoginRequestMsg = re.compile("^<requestMsg domainType=\"RSSL_DMT_LOGIN\" .*")
javaLoginRequestMsg = re.compile("^<REQUEST domainType=\"LOGIN\".*")
line = input.readline()
while (line):
if cppLoginRequestMsg.match(line):
cpp = True
break
if javaLoginRequestMsg.match(line):
java = True
break
line = input.readline()
if cpp == False and java == False:
print("did not find login request msg")
sys.exit(1)
if cpp == True:
print("CPP input")
if java == True:
print("JAVA input")
with open("out", "rU") as input:
while True:
print()
event = findNextEvent(input)
if event:
print(event)
# handle request
if event[1] == "requesting" and event[4] == "request":
sortedUniqueFids = sorted(set(event[6:]))
previousFids = currentFids
currentFids = sorted(set(currentFids + sortedUniqueFids))
if java:
verify(input, event, previousFids, currentFids)
print("fids matched for request", event[5][:-1])
# next event is the handle
handleEvent = findNextEvent(input)
if handleEvent:
if handleEvent[1] == "handle":
handle = handleEvent[2]
print("handle for request", event[5][:-1], "was", handle)
else:
print("expected to find handle event after request event; found this event [", handleEvent, "event instead")
sys.exit(1)
else:
print("expected to find handle event after request event; did not find any event")
sys.exit(1)
outstandingRequests[handle] = sortedUniqueFids
if cpp:
verify(input, event, previousFids, currentFids)
print("fids matched for request", event[5][:-1])
# reissue
if event[1] == "requesting" and event[4] == "reissue":
sortedUniqueFids = sorted(set(event[6:]))
handleEvent = findNextEvent(input)
if handleEvent:
if handleEvent[1] == "reissue" and handleEvent[3] == "handle":
previousFids = currentFids
handle = handleEvent[4]
print("reissue for handle", handle)
outstandingRequests[handle] = sortedUniqueFids;
# recreate currentFids
currentFids=[]
for h, fids in outstandingRequests.items():
currentFids = sorted(set(currentFids + fids))
verify(input, event, previousFids, currentFids)
print("fids matched for reissue", event[5][:-1], "( handle", handle, ")")
else:
print("expected to find handle event after reissue event; found this event [", handleEvent, "event instead")
sys.exit(1)
else:
print("expected to find handle event after reissue event; did not find any event")
sys.exit(1)
# removing handle
if event[1] == "removing":
handleBeingRemoved = event[-1]
del outstandingRequests[handleBeingRemoved]
# no requests left so closeMsg is expected
if not outstandingRequests:
if java:
closeMsg = re.compile("^<CLOSE domainType=\"MARKET_PRICE\".*$")
else:
closeMsg = re.compile("^<closeMsg domainType=\"RSSL_DMT_MARKET_PRICE\".*$")
line = input.readline()
while line:
if closeMsg.match(line):
print("found expected closeMsg after removing handle", event[-1])
sys.exit(0)
line = input.readline()
print("expected to find closeMsg after removing handle", event[-1])
sys.exit(1)
# recreate currentFids
previousFids = currentFids;
currentFids=[]
for handle, fids in outstandingRequests.items():
currentFids = sorted(set(currentFids + fids))
verify(input, event, previousFids, currentFids)
print("fids matched after removing handle", event[-1])
else:
for h, f in outstandingRequests.iteritems():
print("handle", h, "has fids", f)
sys.exit(0)
37.908213
134
0.535109
793fa00a4a9429e8f7e5713430ec03e1f4849f97
3,791
py
Python
closed/HPE/configs/resnet50/Server/__init__.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[
"Apache-2.0"
]
12
2021-09-23T08:05:57.000Z
2022-03-21T03:52:11.000Z
closed/HPE/configs/resnet50/Server/__init__.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[
"Apache-2.0"
]
11
2021-09-23T20:34:06.000Z
2022-01-22T07:58:02.000Z
closed/HPE/configs/resnet50/Server/__init__.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[
"Apache-2.0"
]
16
2021-09-23T20:26:38.000Z
2022-03-09T12:59:56.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
from code.common.constants import Benchmark, Scenario
from code.common.system_list import System, Architecture, MIGConfiguration, MIGSlice
from configs.configuration import *
@ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP)
class A100_SXM_80GBx4(BenchmarkConfiguration):
system = System("A100-SXM-80GB", Architecture.Ampere, 4)
active_sms = 100
input_dtype = "int8"
input_format = "linear"
map_path = "data_maps/imagenet/val_map.txt"
precision = "int8"
tensor_path = "${PREPROCESSED_DATA_DIR}/imagenet/ResNet50/int8_linear"
use_deque_limit = True
deque_timeout_usec = 4000
gpu_batch_size = 128
gpu_copy_streams = 4
gpu_inference_streams = 2
server_target_qps = 130000
use_cuda_thread_per_device = True
use_graphs = True
scenario = Scenario.Server
benchmark = Benchmark.ResNet50
@ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP)
class A100_SXM_80GBx4_Triton(BenchmarkConfiguration):
system = System("A100-SXM-80GB", Architecture.Ampere, 4)
active_sms = 100
input_dtype = "int8"
input_format = "linear"
map_path = "data_maps/imagenet/val_map.txt"
precision = "int8"
tensor_path = "${PREPROCESSED_DATA_DIR}/imagenet/ResNet50/int8_linear"
use_deque_limit = True
deque_timeout_usec = 4000
gpu_batch_size = 128
gpu_copy_streams = 1
gpu_inference_streams = 2
server_target_qps = 95000
use_cuda_thread_per_device = True
use_graphs = False
scenario = Scenario.Server
benchmark = Benchmark.ResNet50
use_triton = True
@ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP)
class A100_SXM_80GBx8(BenchmarkConfiguration):
system = System("A100-SXM-80GB", Architecture.Ampere, 8)
active_sms = 100
input_dtype = "int8"
input_format = "linear"
map_path = "data_maps/imagenet/val_map.txt"
precision = "int8"
tensor_path = "${PREPROCESSED_DATA_DIR}/imagenet/ResNet50/int8_linear"
use_deque_limit = True
deque_timeout_usec = 4000
gpu_batch_size = 128
gpu_copy_streams = 4
gpu_inference_streams = 2
server_target_qps = 260000
start_from_device = False
use_cuda_thread_per_device = True
use_graphs = True
scenario = Scenario.Server
benchmark = Benchmark.ResNet50
@ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP)
class A100_SXM_80GBx8_Triton(BenchmarkConfiguration):
system = System("A100-SXM-80GB", Architecture.Ampere, 8)
active_sms = 100
input_dtype = "int8"
input_format = "linear"
map_path = "data_maps/imagenet/val_map.txt"
precision = "int8"
tensor_path = "${PREPROCESSED_DATA_DIR}/imagenet/ResNet50/int8_linear"
use_deque_limit = True
deque_timeout_usec = 4000
gpu_batch_size = 128
gpu_copy_streams = 1
gpu_inference_streams = 2
server_target_qps = 190000
start_from_device = False
use_cuda_thread_per_device = True
use_graphs = False
scenario = Scenario.Server
benchmark = Benchmark.ResNet50
use_triton = True
34.463636
84
0.744658
793fa0120085b30a3f3bd0279b33c4f15bbf5ccc
5,285
py
Python
docs/source/conf.py
oduwsdl/MementoEmbed
20e035a310527d5b4f2b4987714c5ec8ff9df2e4
[
"MIT"
]
11
2018-06-27T07:00:20.000Z
2021-07-14T06:51:46.000Z
docs/source/conf.py
oduwsdl/MementoEmbed
20e035a310527d5b4f2b4987714c5ec8ff9df2e4
[
"MIT"
]
131
2018-06-07T22:42:20.000Z
2021-11-15T01:08:53.000Z
docs/source/conf.py
oduwsdl/MementoEmbed
20e035a310527d5b4f2b4987714c5ec8ff9df2e4
[
"MIT"
]
2
2019-06-06T07:50:54.000Z
2019-10-29T10:20:04.000Z
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'MementoEmbed'
copyright = u': Licensed under a Creative Commons Attribution-ShareAlike 4.0 International License (http://creativecommons.org/licenses/by-sa/4.0/) by the Old Dominion University Web Science and Digital Libraries Research Group.'
author = 'Shawn M. Jones'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.2021.03.24.211511'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MementoEmbeddoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MementoEmbed.tex', 'MementoEmbed Documentation',
'Shawn M. Jones', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mementoembed', 'MementoEmbed Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MementoEmbed', 'MementoEmbed Documentation',
author, 'MementoEmbed', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_stylesheet("http://robustlinks.mementoweb.org/tools/js/robustlinks.css")
app.add_javascript("http://robustlinks.mementoweb.org/tools/js/robustlinks-min.js")
32.030303
229
0.660927
793fa01c269e91a2a23ccb7951573654714777f4
14,190
py
Python
lib/dataset/voc.py
transcendentsky/ssd_pytorch
f1e20318d37b11f99c207d5e19f49ec662bf80f9
[
"MIT"
]
10
2018-06-26T04:08:12.000Z
2021-08-02T03:57:56.000Z
lib/dataset/voc.py
transcendentsky/ssd_pytorch
f1e20318d37b11f99c207d5e19f49ec662bf80f9
[
"MIT"
]
1
2019-02-25T08:55:25.000Z
2019-02-25T08:55:25.000Z
lib/dataset/voc.py
transcendentsky/ssd_pytorch
f1e20318d37b11f99c207d5e19f49ec662bf80f9
[
"MIT"
]
8
2018-07-29T02:08:18.000Z
2021-08-02T03:57:55.000Z
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
from .voc_eval import voc_eval
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
VOC_CLASSES = ( '__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# for making bounding boxes pretty
COLORS = ((255, 0, 0, 128), (0, 255, 0, 128), (0, 0, 255, 128),
(0, 255, 255, 128), (255, 0, 255, 128), (255, 255, 0, 128))
class VOCSegmentation(data.Dataset):
"""VOC Segmentation Dataset Object
input and target are both images
NOTE: need to address https://github.com/pytorch/vision/issues/9
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg: 'train', 'val', 'test').
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target image
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, image_set, transform=None, target_transform=None,
dataset_name='VOC2007'):
self.root = root
self.image_set = image_set
self.transform = transform
self.target_transform = target_transform
self._annopath = os.path.join(
self.root, dataset_name, 'SegmentationClass', '%s.png')
self._imgpath = os.path.join(
self.root, dataset_name, 'JPEGImages', '%s.jpg')
self._imgsetpath = os.path.join(
self.root, dataset_name, 'ImageSets', 'Segmentation', '%s.txt')
with open(self._imgsetpath % self.image_set) as f:
self.ids = f.readlines()
self.ids = [x.strip('\n') for x in self.ids]
def __getitem__(self, index):
img_id = self.ids[index]
target = Image.open(self._annopath % img_id).convert('RGB')
img = Image.open(self._imgpath % img_id).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids)
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0,5))
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
#cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack((res,bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class VOCDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, image_sets, preproc=None, target_transform=AnnotationTransform(),
dataset_name='VOC0712'):
self.root = root
self.image_set = image_sets
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self._annopath = os.path.join('%s', 'Annotations', '%s.xml')
self._imgpath = os.path.join('%s', 'JPEGImages', '%s.jpg')
self.ids = list()
for (year, name) in image_sets:
self._year = year
rootpath = os.path.join(self.root, 'VOC' + year)
for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
self.ids.append((rootpath, line.strip()))
def __getitem__(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
#print(img.size())
# target = self.target_transform(target, width, height)
# print(target.shape)
assert img is not None, "Img Error"
return img, target
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
# gt = self.target_transform(anno, 1, 1)
# gt = self.target_transform(anno)
# return img_id[1], gt
if self.target_transform is not None:
anno = self.target_transform(anno)
return anno
def pull_img_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno)
height, width, _ = img.shape
boxes = gt[:,:-1]
labels = gt[:,-1]
boxes[:, 0::2] /= width
boxes[:, 1::2] /= height
labels = np.expand_dims(labels,1)
targets = np.hstack((boxes,labels))
return img, targets
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0).cpu()
# trans Fixed randperm Problem
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
self._write_voc_results_file(all_boxes)
aps,map = self._do_python_eval(output_dir)
return aps,map
def _get_voc_results_file_template(self):
filename = 'comp4_det_test' + '_{:s}.txt'
filedir = os.path.join(
self.root, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(VOC_CLASSES):
cls_ind = cls_ind
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.ids):
index = index[1]
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
rootpath = os.path.join(self.root, 'VOC' + self._year)
name = self.image_set[0][1]
annopath = os.path.join(
rootpath,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
rootpath,
'ImageSets',
'Main',
name+'.txt')
cachedir = os.path.join(self.root, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if output_dir is not None and not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(VOC_CLASSES):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
if output_dir is not None:
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
return aps,np.mean(aps)
def show(self, index):
img, target = self.__getitem__(index)
for obj in target:
obj = obj.astype(np.int)
cv2.rectangle(img, (obj[0], obj[1]), (obj[2], obj[3]), (255,0,0), 3)
cv2.imwrite('./image.jpg', img)
## test
# if __name__ == '__main__':
# ds = VOCDetection('../../../../../dataset/VOCdevkit/', [('2012', 'train')],
# None, AnnotationTransform())
# print(len(ds))
# img, target = ds[0]
# print(target)
# ds.show(1)
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = 'www.zakariafadli.com'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
import warnings
from .CallbackFlag import CallbackFlag
from .exceptions_warnings import OverwriteWarning
class CallbackMixin:
def __init__(self):
"""
Callback mix-in for ALNS. This allows for some flexibility by having
ALNS call custom functions whenever a special event happens.
"""
self._callbacks = {}
def on_best(self, func):
"""
Sets a callback function to be called when ALNS finds a new global best
solution state.
Parameters
----------
func : callable
A function that should take a solution State as its first parameter,
and a numpy RandomState as its second (cf. the operator signature).
It should return a (new) solution State.
Warns
-----
OverwriteWarning
When a callback has already been set for the ON_BEST flag.
"""
self._set_callback(CallbackFlag.ON_BEST, func)
def has_callback(self, flag):
"""
Determines if a callable has been set for the passed-in flag.
Parameters
----------
flag : CallbackFlag
Returns
-------
bool
True if a callable is set, False otherwise.
"""
return flag in self._callbacks
def callback(self, flag):
"""
Returns the callback for the passed-in flag, assuming it exists.
Parameters
----------
flag : CallbackFlag
The callback flag for which to retrieve a callback.
Returns
-------
callable
Callback for the passed-in flag.
"""
return self._callbacks[flag]
def _set_callback(self, flag, func):
"""
Sets the passed-in callback func for the passed-in flag. Warns if this
would overwrite an existing callback.
"""
if self.has_callback(flag):
warnings.warn("A callback function has already been set for the"
" `{0}' flag. This callback will now be replaced by"
" the newly passed-in callback.".format(flag),
OverwriteWarning)
self._callbacks[flag] = func
28.641026
80
0.57162
793fa4e4b8dd99ce7350b3ecc407e691a8708726
8,765
py
Python
lambda_functions/batcher/main.py
twaldear/binaryalert
e23a904d3a1a620ae23dd687ff0e5af2d8e230c7
[
"Apache-2.0"
]
null
null
null
lambda_functions/batcher/main.py
twaldear/binaryalert
e23a904d3a1a620ae23dd687ff0e5af2d8e230c7
[
"Apache-2.0"
]
null
null
null
lambda_functions/batcher/main.py
twaldear/binaryalert
e23a904d3a1a620ae23dd687ff0e5af2d8e230c7
[
"Apache-2.0"
]
null
null
null
"""Batching Lambda function - puts all S3 objects into SQS to be re-analyzed."""
# Expects the following environment variables:
# BATCH_LAMBDA_NAME: The name of this Lambda function.
# BATCH_LAMBDA_QUALIFIER: The qualifier (alias) which is used to invoke this function.
# OBJECT_PREFIX: (Optional) Limit batching to keys which begin with the specified prefix.
# OBJECTS_PER_MESSAGE: The number of S3 objects to pack into a single SQS message.
# S3_BUCKET_NAME: Name of the S3 bucket to enumerate.
# SQS_QUEUE_URL: URL of the SQS queue which will buffer all of the S3 objects for analysis.
import json
import logging
import os
from typing import Any, Dict, List, Optional
import boto3
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
CLOUDWATCH = boto3.client('cloudwatch')
LAMBDA = boto3.client('lambda')
S3 = boto3.client('s3')
SQS = boto3.resource('sqs')
SQS_MAX_MESSAGES_PER_BATCH = 10
class SQSMessage(object):
"""Encapsulates a single SQS message (which will contain multiple S3 keys)."""
def __init__(self, msg_id: int) -> None:
"""Create a new message structure, which will store a list of S3 keys.
Args:
msg_id: Message index in the global list.
"""
self._id = msg_id
self._keys: List[str] = []
@property
def num_keys(self) -> int:
"""Returns the number of keys stored in the SQS message so far."""
return len(self._keys)
def add_key(self, key: str) -> None:
"""Add another S3 key to the message."""
self._keys.append(key)
def sqs_entry(self) -> Dict[str, str]:
"""Returns a message entry in the format expected by sqs_client.send_message_batch().
Moreover, the message body matches the structure of an S3 added event. This gives all
messages in the queue the same format and enables the dispatcher to parse them consistently.
"""
return {
'Id': str(self._id),
'MessageBody': json.dumps({
'Records': [
{
's3': {
'bucket': {'name': os.environ['S3_BUCKET_NAME']},
'object': {'key': key}
}
}
for key in self._keys
]
})
}
def reset(self) -> None:
"""Remove the stored list of S3 keys."""
self._keys = []
class SQSBatcher(object):
"""Collect groups of S3 keys and batch them into as few SQS requests as possible."""
def __init__(self, queue_url: str, objects_per_message: int) -> None:
"""Create a new SQS batcher.
Args:
queue_url: Destination SQS queue URL.
objects_per_message: The maximum number of S3 keys to put in each SQS message.
Note that the downstream analyzer Lambdas will each process at most
(objects_per_message * messages_per_batch) binaries. The analyzer runtime limit is the
ultimate constraint on the size of each batch.
"""
self._queue = SQS.Queue(queue_url)
self._objects_per_message = objects_per_message
self._messages = [SQSMessage(i) for i in range(SQS_MAX_MESSAGES_PER_BATCH)]
self._msg_index = 0 # The index of the SQS message where keys are currently being added.
# The first and last keys added to this batch.
self._first_key: Optional[str] = None
self._last_key: Optional[str] = None
def _send_batch(self) -> None:
"""Group keys into messages and make a single batch request."""
LOGGER.info('Sending SQS batch of %d keys: %s ... %s',
sum(msg.num_keys for msg in self._messages), self._first_key, self._last_key)
response = self._queue.send_messages(
Entries=[msg.sqs_entry() for msg in self._messages if msg.num_keys > 0]
)
failures = response.get('Failed', [])
if failures:
# TODO: If failure['SenderFault'] == False, we could retry the failed messages
for failure in failures:
LOGGER.error('Unable to enqueue SQS message: %s', failure)
CLOUDWATCH.put_metric_data(Namespace='BinaryAlert', MetricData=[{
'MetricName': 'BatchEnqueueFailures',
'Value': len(failures),
'Unit': 'Count'
}])
for msg in self._messages:
msg.reset()
self._first_key = None
def add_key(self, key: str) -> None:
"""Add a new S3 key to the message batch and send to SQS if necessary."""
if not self._first_key:
self._first_key = key
self._last_key = key
msg = self._messages[self._msg_index]
msg.add_key(key)
# If the current message is full, move to the next one.
if msg.num_keys == self._objects_per_message:
self._msg_index += 1
# If all of the messages are full, fire off to SQS.
if self._msg_index == SQS_MAX_MESSAGES_PER_BATCH:
self._send_batch()
self._msg_index = 0
def finalize(self) -> None:
"""After all messages have been added, send the remaining as a last batch to SQS."""
if self._first_key:
LOGGER.info('Finalize: sending last batch of keys')
self._send_batch()
class S3BucketEnumerator(object):
"""Enumerates all of the S3 objects in a given bucket."""
def __init__(self, bucket_name: str, prefix: Optional[str],
continuation_token: Optional[str] = None) -> None:
"""Instantiate with an optional continuation token.
Args:
bucket_name: Name of the S3 bucket to enumerate.
prefix: Limit the enumeration to keys which begin with the specified prefix.
continuation_token: Continuation token returned from S3 list objects.
"""
# Construct the list_objects keyword arguments.
self.kwargs = {'Bucket': bucket_name}
if prefix:
LOGGER.info('Restricting batch operation to prefix: %s', prefix)
self.kwargs['Prefix'] = prefix
if continuation_token:
self.kwargs['ContinuationToken'] = continuation_token
self.finished = False # Have we finished enumerating all of the S3 bucket?
@property
def continuation_token(self) -> str:
return self.kwargs.get('ContinuationToken')
def next_page(self) -> List[str]:
"""Get the next page of S3 objects and sets self.finished = True if this is the last page.
Returns:
List of S3 object keys.
"""
response = S3.list_objects_v2(**self.kwargs)
if 'Contents' not in response:
LOGGER.info('The S3 bucket is empty; nothing to do')
self.finished = True
return []
self.kwargs['ContinuationToken'] = response.get('NextContinuationToken')
if not response['IsTruncated']:
self.finished = True
return [obj['Key'] for obj in response['Contents']]
def batch_lambda_handler(event: Dict[str, str], lambda_context: Any) -> int:
"""Entry point for the batch Lambda function.
Args:
event: Invocation event. If 'S3ContinuationToken' is one of the keys, the S3 bucket
will be enumerated beginning with that continuation token.
lambda_context: LambdaContext object with .get_remaining_time_in_millis().
Returns:
The number of enumerated S3 keys.
"""
LOGGER.info('Invoked with event %s', event)
s3_enumerator = S3BucketEnumerator(
os.environ['S3_BUCKET_NAME'],
os.environ.get('OBJECT_PREFIX'),
event.get('S3ContinuationToken')
)
sqs_batcher = SQSBatcher(os.environ['SQS_QUEUE_URL'], int(os.environ['OBJECTS_PER_MESSAGE']))
# As long as there are at least 10 seconds remaining, enumerate S3 objects into SQS.
num_keys = 0
while lambda_context.get_remaining_time_in_millis() > 10000 and not s3_enumerator.finished:
keys = s3_enumerator.next_page()
num_keys += len(keys)
for key in keys:
sqs_batcher.add_key(key)
# Send the last batch of keys.
sqs_batcher.finalize()
# If the enumerator has not yet finished but we're low on time, invoke this function again.
if not s3_enumerator.finished:
LOGGER.info('Invoking another batcher')
LAMBDA.invoke(
FunctionName=os.environ['BATCH_LAMBDA_NAME'],
InvocationType='Event', # Asynchronous invocation.
Payload=json.dumps({'S3ContinuationToken': s3_enumerator.continuation_token}),
Qualifier=os.environ['BATCH_LAMBDA_QUALIFIER']
)
return num_keys
37.780172
100
0.627952
793fa4f40a099a4a8047a90edf790c0f1d399274
896
py
Python
hyperplan/get_input.py
hyperplan-io/cli
dc7d407701fd78d9065d60c35b0f2674b28c86bb
[
"MIT"
]
1
2019-09-04T02:33:34.000Z
2019-09-04T02:33:34.000Z
hyperplan/get_input.py
hyperplan-io/cli
dc7d407701fd78d9065d60c35b0f2674b28c86bb
[
"MIT"
]
1
2019-09-16T06:09:42.000Z
2019-09-16T06:09:42.000Z
hyperplan/get_input.py
hyperplan-io/cli
dc7d407701fd78d9065d60c35b0f2674b28c86bb
[
"MIT"
]
null
null
null
def get_alphanumerical_id():
data = input('name(alphanumerical): ')
if data.isalnum():
return data
else:
print('name should be an alphanumerical string')
return get_alphanumerical_id()
def get_feature_type():
feature_type = input('type(string, float, int): ')
if feature_type != 'string' and feature_type != 'float' and feature_type != 'int':
print("feature type should be one of: 'string', 'float', 'int'")
return get_feature_type()
else:
return feature_type
def get_feature_dimension():
feature_dimension = input("dimension('scalar', 'array', 'matrix'): ")
if feature_dimension != 'scalar' and feature_dimension != 'array' and feature_dimension != 'matrix':
print("dimension should be one of 'scalar', 'array', 'matrix'")
return get_feature_dimension()
else:
return feature_dimension
37.333333
104
0.657366
793fa508023c73b4d390cf748996775178f51a5f
4,452
py
Python
Cogs/Printer.py
cheesycod/CorpBot.py
61af17bac6ff00c5eeaedc97931b62c5d3f02fcf
[
"MIT"
]
368
2016-10-17T21:21:12.000Z
2022-03-18T09:22:56.000Z
Cogs/Printer.py
cheesycod/CorpBot.py
61af17bac6ff00c5eeaedc97931b62c5d3f02fcf
[
"MIT"
]
60
2017-01-01T01:35:10.000Z
2022-01-19T18:43:00.000Z
Cogs/Printer.py
cheesycod/CorpBot.py
61af17bac6ff00c5eeaedc97931b62c5d3f02fcf
[
"MIT"
]
189
2016-10-10T20:38:11.000Z
2022-03-26T12:23:49.000Z
import asyncio
import discord
import time
import os
import random
import math
import numpy as np
from PIL import Image
from discord.ext import commands
from Cogs import GetImage
from Cogs import DisplayName
from Cogs import Message
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Printer(bot, settings))
class Printer(commands.Cog):
# Init with the bot reference
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def canDisplay(self, server):
# Check if we can display images
lastTime = int(self.settings.getServerStat(server, "LastPicture"))
threshold = int(self.settings.getServerStat(server, "PictureThreshold"))
if not GetImage.canDisplay( lastTime, threshold ):
# await self.bot.send_message(channel, 'Too many images at once - please wait a few seconds.')
return False
# If we made it here - set the LastPicture method
self.settings.setServerStat(server, "LastPicture", int(time.time()))
return True
def _ascii(self, image):
try:
chars = np.asarray(list(' .,:;irsXA253hMHGS#9B&@'))
f, WCF, GCF = image, 7/4, .6
img = Image.open(image)
# Make sure we have frame 1
img = img.convert('RGBA')
# Let's scale down
w, h = 0, 0
adjust = 2
w = img.size[0]*adjust
h = img.size[1]
# Make sure we're under max params of 50h, 50w
ratio = 1
max_wide = 80
if h*2 > w:
if h > max_wide/adjust:
ratio = max_wide/adjust/h
else:
if w > max_wide:
ratio = max_wide/w
h = ratio * h
w = ratio * w
# Shrink to an area of 1900 or so (allows for extra chars)
target = 1900
if w*h > target:
r = h/w
w1 = math.sqrt(target/r)
h1 = target/w1
w = w1
h = h1
S = ( round(w), round(h) )
img = np.sum( np.asarray( img.resize(S) ), axis=2)
img -= img.min()
img = (1.0 - img/img.max())**GCF*(chars.size-1)
a = "\n".join( ("".join(r) for r in chars[len(chars)-img.astype(int)-1]))
a = "```\n" + a + "```"
return a
except Exception:
pass
return False
@commands.command(pass_context=True)
async def printavi(self, ctx, *, member = None):
"""Returns a url to the passed member's avatar."""
if member == None:
# Assume author
member = ctx.author
if type(member) is str:
new_mem = DisplayName.memberForName(member, ctx.guild)
if not new_mem:
await ctx.send("I couldn't find that member...")
return
member = new_mem
url = member.avatar_url
if not len(url):
url = member.default_avatar_url
name = DisplayName.name(member)
if name[-1].lower() == "s":
name += "' Avatar"
else:
name += "'s Avatar"
await Message.Embed(title=name, image=url, color=ctx.author).send(ctx)
@commands.command(pass_context=True)
async def print(self, ctx, *, url = None):
"""DOT MATRIX. Accepts a url - or picks the first attachment."""
if not self.canDisplay(ctx.guild):
return
if url == None and len(ctx.message.attachments) == 0:
await ctx.send("Usage: `{}print [url or attachment]`".format(ctx.prefix))
return
if url == None:
url = ctx.message.attachments[0].url
# Let's check if the "url" is actually a user
test_user = DisplayName.memberForName(url, ctx.guild)
if test_user:
# Got a user!
url = test_user.avatar_url
if not len(url):
url = test_user.default_avatar_url
message = await ctx.send("Downloading...")
path = await GetImage.download(url)
if not path:
await message.edit(content="I guess I couldn't print that one... Make sure you're passing a valid url or attachment.")
return
# Prant that shaz
final = self._ascii(path)
if os.path.exists(path):
GetImage.remove(path)
if not final:
await message.edit(content="I couldn't print that image... Make sure you're pointing me to a valid image file.")
return
if len(final) > 2000:
# Too many bigs
await message.edit(content="Whoops! I ran out of ink - maybe try a different image.")
return
print_sounds = [ "ZZzzzzzt", "Bzzt", "Vvvvrrrr", "Chhhaakkakaka", "Errrttt", "Kkkkkkkktttt", "Eeehhhnnkkk" ]
msg = "Printing..."
await message.edit(content=msg)
for i in range(5):
await asyncio.sleep(1)
msg += " " + random.choice(print_sounds) + "..."
await message.edit(content=msg)
await asyncio.sleep(1)
await message.edit(content=final)
28
122
0.6615
793fa74fde0df2c8c0c6332e338975513d628850
159
py
Python
python/interpret-core/interpret/version.py
Liyuhang97/interpret
b1130589afd2550f914c5a68bd52def6bcb75b89
[
"MIT"
]
1
2019-10-24T21:05:02.000Z
2019-10-24T21:05:02.000Z
python/interpret-core/interpret/version.py
LTHODAVDOPL/interpret
71c8d4f7a537ca7ed98f8bc4fdc2899e93405094
[
"MIT"
]
null
null
null
python/interpret-core/interpret/version.py
LTHODAVDOPL/interpret
71c8d4f7a537ca7ed98f8bc4fdc2899e93405094
[
"MIT"
]
null
null
null
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
# NOTE: Version is replaced by a regex script.
__version__ = "0.1.18"
26.5
46
0.761006
793fa79633cee44221beb357c5513ef66bd24dfb
680
py
Python
curso_em_video-en/exercices/ex062_pa_parada.py
brunocampos01/becoming-a-expert-python
32b12ca80ad25cc25831f383e83eb199bfe0ad7e
[
"MIT"
]
56
2019-05-18T20:04:40.000Z
2021-12-12T02:28:24.000Z
curso_em_video-en/exercices/ex062_pa_parada.py
brunocampos01/becoming-a-expert-python
32b12ca80ad25cc25831f383e83eb199bfe0ad7e
[
"MIT"
]
18
2019-11-11T11:00:51.000Z
2021-12-30T01:52:41.000Z
curso_em_video-en/exercices/ex062_pa_parada.py
brunocampos01/becoming-a-expert-python
32b12ca80ad25cc25831f383e83eb199bfe0ad7e
[
"MIT"
]
10
2019-11-10T21:22:55.000Z
2021-12-30T11:40:17.000Z
"""
Exercice Python 062:
Melhore o DESAFIO 061,
perguntando para o usuário se ele quer mostrar mais alguns termos.
- O programa encerrará quando ele disser que quer mostrar 0 termos.
"""
print('\narithmetic progress generator\n')
first = int(input('Type fisrt term: '))
ratio = int(input('Type a rate: '))
element = first
count = 1
total = 0
moreElement = 10
while moreElement != 0:
total = total + moreElement
while count <= total:
print('{}'.format(element), end=' -> ')
element += ratio
count += 1
print('PAUSE')
moreElement = int(input('More elements? type 0 to exit '))
print('Arithmetic progress with {} elements.'.format(total))
25.185185
67
0.666176
793fa79b6b4f5828b5a2d00d5242078a375ee4a1
1,463
py
Python
torcharrow/velox_rt/column.py
Pandinosaurus/torcharrow
1fac1441cd89a8ea8f63300d8be7148295b30014
[
"BSD-3-Clause"
]
null
null
null
torcharrow/velox_rt/column.py
Pandinosaurus/torcharrow
1fac1441cd89a8ea8f63300d8be7148295b30014
[
"BSD-3-Clause"
]
null
null
null
torcharrow/velox_rt/column.py
Pandinosaurus/torcharrow
1fac1441cd89a8ea8f63300d8be7148295b30014
[
"BSD-3-Clause"
]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates.
import typing as ty
import torcharrow._torcharrow as velox
from torcharrow import Scope
from torcharrow.dispatcher import Device
from torcharrow.dtypes import DType
from torcharrow.icolumn import IColumn
# TODO: Rename this class to IColumnVelox or IColumnCpu
class ColumnFromVelox:
_data: velox.BaseColumn
_finialized: bool
@staticmethod
def from_velox(
device: Device,
dtype: DType,
data: velox.BaseColumn,
finialized: bool,
) -> IColumn:
col = Scope._Column(dtype=dtype, device=device)
col._data = data
col._finialized = finialized
return col
# Velox column returned from generic dispatch always assumes returned column is nullable
# This help method allows to alter it based on context (e.g. methods in IStringMethods can have better inference)
#
# TODO: rename this as _with_null as alternating nullability is dangerous.
# We should also infer the type nullability flag better with function metadata on Velox.
def with_null(self, nullable: bool):
return self.from_velox(
self.device, self.dtype.with_null(nullable), self._data, True
)
def _concat_with(self, columns: ty.List[IColumn]):
concat_list = self.to_pylist()
for column in columns:
concat_list += column.to_pylist()
return Scope._FromPyList(concat_list, self.dtype)
34.833333
117
0.701299
793fa7ca7c4b4e08bb9a524f4c3689ee24c71357
322
py
Python
rest-service/MongoEncoder.py
pieterderycke/detect-paper-sheet
bd1ada67ac64be72efba02f1fd578843e6a4028a
[
"Apache-2.0"
]
2
2019-10-10T08:00:09.000Z
2020-01-16T03:53:40.000Z
rest-service/MongoEncoder.py
pieterderycke/detect-paper-sheet
bd1ada67ac64be72efba02f1fd578843e6a4028a
[
"Apache-2.0"
]
null
null
null
rest-service/MongoEncoder.py
pieterderycke/detect-paper-sheet
bd1ada67ac64be72efba02f1fd578843e6a4028a
[
"Apache-2.0"
]
null
null
null
from json import JSONEncoder
#from connexion.apps.flask_app import FlaskJSONEncoder
from bson.objectid import ObjectId
class MongoEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
else:
return JSONEncoder.default(self, obj)
32.2
54
0.677019
793fa7d426d0434bc55e56c69214925a15d78d0f
807
py
Python
Metrics/New Tab with Fraction Figure Combos.py
danielgamage/Mekkablue-Scripts
0b0b4468ec938f8c669b3552e2fa429080b65bf1
[
"Apache-2.0"
]
null
null
null
Metrics/New Tab with Fraction Figure Combos.py
danielgamage/Mekkablue-Scripts
0b0b4468ec938f8c669b3552e2fa429080b65bf1
[
"Apache-2.0"
]
null
null
null
Metrics/New Tab with Fraction Figure Combos.py
danielgamage/Mekkablue-Scripts
0b0b4468ec938f8c669b3552e2fa429080b65bf1
[
"Apache-2.0"
]
null
null
null
#MenuTitle: New Tab with Fraction Figure Combinations
# -*- coding: utf-8 -*-
__doc__="""
Open Tab with fraction figure combos for spacing and kerning.
"""
thisFont = Glyphs.font
paragraph = "/%s\n" % "/".join( [g.name for g in thisFont.glyphs if g.export and (g.name.startswith("percent") or g.name.startswith("perthousand"))] )
z = "/zero.numr"
figs = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
for numr in figs:
n = "/%s.numr" % numr
line = z+n+z+n+n+z+z
for dnom in figs:
line += "/zero.numr/%s.numr/fraction/%s.dnom/zero.dnom " % (numr,dnom)
paragraph += line
paragraph += "\n"
# in case last line fails, the text is in the macro window:
Glyphs.clearLog() # clears macro window log
print paragraph
# opens new Edit tab:
thisFont.newTab( paragraph )
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.redis.v1',
manifest={
'Instance',
'ListInstancesRequest',
'ListInstancesResponse',
'GetInstanceRequest',
'CreateInstanceRequest',
'UpdateInstanceRequest',
'UpgradeInstanceRequest',
'DeleteInstanceRequest',
'GcsSource',
'InputConfig',
'ImportInstanceRequest',
'GcsDestination',
'OutputConfig',
'ExportInstanceRequest',
'FailoverInstanceRequest',
'OperationMetadata',
'LocationMetadata',
'ZoneMetadata',
},
)
class Instance(proto.Message):
r"""A Google Cloud Redis instance.
Attributes:
name (str):
Required. Unique name of the resource in this scope
including project and location using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
Note: Redis instances are managed and addressed at regional
level so location_id here refers to a GCP region; however,
users may choose which specific zone (or collection of zones
for cross-zone instances) an instance should be provisioned
in. Refer to
[location_id][google.cloud.redis.v1.Instance.location_id]
and
[alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id]
fields for more details.
display_name (str):
An arbitrary and optional user-provided name
for the instance.
labels (Sequence[google.cloud.redis_v1.types.Instance.LabelsEntry]):
Resource labels to represent user provided
metadata
location_id (str):
Optional. The zone where the instance will be provisioned.
If not provided, the service will choose a zone for the
instance. For STANDARD_HA tier, instances will be created
across two zones for protection against zonal failures. If
[alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id]
is also provided, it must be different from
[location_id][google.cloud.redis.v1.Instance.location_id].
alternative_location_id (str):
Optional. Only applicable to STANDARD_HA tier which protects
the instance against zonal failures by provisioning it
across two zones. If provided, it must be a different zone
from the one provided in
[location_id][google.cloud.redis.v1.Instance.location_id].
redis_version (str):
Optional. The version of Redis software. If not provided,
latest supported version will be used. Currently, the
supported values are:
- ``REDIS_3_2`` for Redis 3.2 compatibility
- ``REDIS_4_0`` for Redis 4.0 compatibility (default)
- ``REDIS_5_0`` for Redis 5.0 compatibility
reserved_ip_range (str):
Optional. The CIDR range of internal
addresses that are reserved for this instance.
If not provided, the service will choose an
unused /29 block, for example, 10.0.0.0/29 or
192.168.0.0/29. Ranges must be unique and non-
overlapping with existing subnets in an
authorized network.
host (str):
Output only. Hostname or IP address of the
exposed Redis endpoint used by clients to
connect to the service.
port (int):
Output only. The port number of the exposed
Redis endpoint.
current_location_id (str):
Output only. The current zone where the Redis endpoint is
placed. For Basic Tier instances, this will always be the
same as the
[location_id][google.cloud.redis.v1.Instance.location_id]
provided by the user at creation time. For Standard Tier
instances, this can be either
[location_id][google.cloud.redis.v1.Instance.location_id] or
[alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id]
and can change after a failover event.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the instance was
created.
state (google.cloud.redis_v1.types.Instance.State):
Output only. The current state of this
instance.
status_message (str):
Output only. Additional information about the
current status of this instance, if available.
redis_configs (Sequence[google.cloud.redis_v1.types.Instance.RedisConfigsEntry]):
Optional. Redis configuration parameters, according to
http://redis.io/topics/config. Currently, the only supported
parameters are:
Redis version 3.2 and newer:
- maxmemory-policy
- notify-keyspace-events
Redis version 4.0 and newer:
- activedefrag
- lfu-decay-time
- lfu-log-factor
- maxmemory-gb
Redis version 5.0 and newer:
- stream-node-max-bytes
- stream-node-max-entries
tier (google.cloud.redis_v1.types.Instance.Tier):
Required. The service tier of the instance.
memory_size_gb (int):
Required. Redis memory size in GiB.
authorized_network (str):
Optional. The full name of the Google Compute Engine
`network <https://cloud.google.com/vpc/docs/vpc>`__ to which
the instance is connected. If left unspecified, the
``default`` network will be used.
persistence_iam_identity (str):
Output only. Cloud IAM identity used by import / export
operations to transfer data to/from Cloud Storage. Format is
"serviceAccount:<service_account_email>". The value may
change over time for a given instance so should be checked
before each import/export operation.
connect_mode (google.cloud.redis_v1.types.Instance.ConnectMode):
Optional. The network connect mode of the Redis instance. If
not provided, the connect mode defaults to DIRECT_PEERING.
"""
class State(proto.Enum):
r"""Represents the different states of a Redis instance."""
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
UPDATING = 3
DELETING = 4
REPAIRING = 5
MAINTENANCE = 6
IMPORTING = 8
FAILING_OVER = 9
class Tier(proto.Enum):
r"""Available service tiers to choose from"""
TIER_UNSPECIFIED = 0
BASIC = 1
STANDARD_HA = 3
class ConnectMode(proto.Enum):
r"""Available connection modes."""
CONNECT_MODE_UNSPECIFIED = 0
DIRECT_PEERING = 1
PRIVATE_SERVICE_ACCESS = 2
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
location_id = proto.Field(
proto.STRING,
number=4,
)
alternative_location_id = proto.Field(
proto.STRING,
number=5,
)
redis_version = proto.Field(
proto.STRING,
number=7,
)
reserved_ip_range = proto.Field(
proto.STRING,
number=9,
)
host = proto.Field(
proto.STRING,
number=10,
)
port = proto.Field(
proto.INT32,
number=11,
)
current_location_id = proto.Field(
proto.STRING,
number=12,
)
create_time = proto.Field(
proto.MESSAGE,
number=13,
message=timestamp_pb2.Timestamp,
)
state = proto.Field(
proto.ENUM,
number=14,
enum=State,
)
status_message = proto.Field(
proto.STRING,
number=15,
)
redis_configs = proto.MapField(
proto.STRING,
proto.STRING,
number=16,
)
tier = proto.Field(
proto.ENUM,
number=17,
enum=Tier,
)
memory_size_gb = proto.Field(
proto.INT32,
number=18,
)
authorized_network = proto.Field(
proto.STRING,
number=20,
)
persistence_iam_identity = proto.Field(
proto.STRING,
number=21,
)
connect_mode = proto.Field(
proto.ENUM,
number=22,
enum=ConnectMode,
)
class ListInstancesRequest(proto.Message):
r"""Request for
[ListInstances][google.cloud.redis.v1.CloudRedis.ListInstances].
Attributes:
parent (str):
Required. The resource name of the instance location using
the form: ``projects/{project_id}/locations/{location_id}``
where ``location_id`` refers to a GCP region.
page_size (int):
The maximum number of items to return.
If not specified, a default value of 1000 will be used by
the service. Regardless of the page_size value, the response
may include a partial list and a caller should only rely on
response's
[``next_page_token``][google.cloud.redis.v1.ListInstancesResponse.next_page_token]
to determine if there are more instances left to be queried.
page_token (str):
The ``next_page_token`` value returned from a previous
[ListInstances][google.cloud.redis.v1.CloudRedis.ListInstances]
request, if any.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListInstancesResponse(proto.Message):
r"""Response for
[ListInstances][google.cloud.redis.v1.CloudRedis.ListInstances].
Attributes:
instances (Sequence[google.cloud.redis_v1.types.Instance]):
A list of Redis instances in the project in the specified
location, or across all locations.
If the ``location_id`` in the parent field of the request is
"-", all regions available to the project are queried, and
the results aggregated. If in such an aggregated query a
location is unavailable, a dummy Redis entry is included in
the response with the ``name`` field set to a value of the
form
``projects/{project_id}/locations/{location_id}/instances/``-
and the ``status`` field set to ERROR and ``status_message``
field set to "location not available for ListInstances".
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
unreachable (Sequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
instances = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Instance',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
unreachable = proto.RepeatedField(
proto.STRING,
number=3,
)
class GetInstanceRequest(proto.Message):
r"""Request for
[GetInstance][google.cloud.redis.v1.CloudRedis.GetInstance].
Attributes:
name (str):
Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateInstanceRequest(proto.Message):
r"""Request for
[CreateInstance][google.cloud.redis.v1.CloudRedis.CreateInstance].
Attributes:
parent (str):
Required. The resource name of the instance location using
the form: ``projects/{project_id}/locations/{location_id}``
where ``location_id`` refers to a GCP region.
instance_id (str):
Required. The logical name of the Redis instance in the
customer project with the following restrictions:
- Must contain only lowercase letters, numbers, and
hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
instance (google.cloud.redis_v1.types.Instance):
Required. A Redis [Instance] resource
"""
parent = proto.Field(
proto.STRING,
number=1,
)
instance_id = proto.Field(
proto.STRING,
number=2,
)
instance = proto.Field(
proto.MESSAGE,
number=3,
message='Instance',
)
class UpdateInstanceRequest(proto.Message):
r"""Request for
[UpdateInstance][google.cloud.redis.v1.CloudRedis.UpdateInstance].
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Mask of fields to update. At least one path must
be supplied in this field. The elements of the repeated
paths field may only include these fields from
[Instance][google.cloud.redis.v1.Instance]:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
instance (google.cloud.redis_v1.types.Instance):
Required. Update description. Only fields specified in
update_mask are updated.
"""
update_mask = proto.Field(
proto.MESSAGE,
number=1,
message=field_mask_pb2.FieldMask,
)
instance = proto.Field(
proto.MESSAGE,
number=2,
message='Instance',
)
class UpgradeInstanceRequest(proto.Message):
r"""Request for
[UpgradeInstance][google.cloud.redis.v1.CloudRedis.UpgradeInstance].
Attributes:
name (str):
Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
redis_version (str):
Required. Specifies the target version of
Redis software to upgrade to.
"""
name = proto.Field(
proto.STRING,
number=1,
)
redis_version = proto.Field(
proto.STRING,
number=2,
)
class DeleteInstanceRequest(proto.Message):
r"""Request for
[DeleteInstance][google.cloud.redis.v1.CloudRedis.DeleteInstance].
Attributes:
name (str):
Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class GcsSource(proto.Message):
r"""The Cloud Storage location for the input content
Attributes:
uri (str):
Required. Source data URI. (e.g.
'gs://my_bucket/my_object').
"""
uri = proto.Field(
proto.STRING,
number=1,
)
class InputConfig(proto.Message):
r"""The input content
Attributes:
gcs_source (google.cloud.redis_v1.types.GcsSource):
Google Cloud Storage location where input
content is located.
"""
gcs_source = proto.Field(
proto.MESSAGE,
number=1,
oneof='source',
message='GcsSource',
)
class ImportInstanceRequest(proto.Message):
r"""Request for
[Import][google.cloud.redis.v1.CloudRedis.ImportInstance].
Attributes:
name (str):
Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
input_config (google.cloud.redis_v1.types.InputConfig):
Required. Specify data to be imported.
"""
name = proto.Field(
proto.STRING,
number=1,
)
input_config = proto.Field(
proto.MESSAGE,
number=3,
message='InputConfig',
)
class GcsDestination(proto.Message):
r"""The Cloud Storage location for the output content
Attributes:
uri (str):
Required. Data destination URI (e.g.
'gs://my_bucket/my_object'). Existing files will be
overwritten.
"""
uri = proto.Field(
proto.STRING,
number=1,
)
class OutputConfig(proto.Message):
r"""The output content
Attributes:
gcs_destination (google.cloud.redis_v1.types.GcsDestination):
Google Cloud Storage destination for output
content.
"""
gcs_destination = proto.Field(
proto.MESSAGE,
number=1,
oneof='destination',
message='GcsDestination',
)
class ExportInstanceRequest(proto.Message):
r"""Request for
[Export][google.cloud.redis.v1.CloudRedis.ExportInstance].
Attributes:
name (str):
Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
output_config (google.cloud.redis_v1.types.OutputConfig):
Required. Specify data to be exported.
"""
name = proto.Field(
proto.STRING,
number=1,
)
output_config = proto.Field(
proto.MESSAGE,
number=3,
message='OutputConfig',
)
class FailoverInstanceRequest(proto.Message):
r"""Request for
[Failover][google.cloud.redis.v1.CloudRedis.FailoverInstance].
Attributes:
name (str):
Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
data_protection_mode (google.cloud.redis_v1.types.FailoverInstanceRequest.DataProtectionMode):
Optional. Available data protection modes that the user can
choose. If it's unspecified, data protection mode will be
LIMITED_DATA_LOSS by default.
"""
class DataProtectionMode(proto.Enum):
r"""Specifies different modes of operation in relation to the
data retention.
"""
DATA_PROTECTION_MODE_UNSPECIFIED = 0
LIMITED_DATA_LOSS = 1
FORCE_DATA_LOSS = 2
name = proto.Field(
proto.STRING,
number=1,
)
data_protection_mode = proto.Field(
proto.ENUM,
number=2,
enum=DataProtectionMode,
)
class OperationMetadata(proto.Message):
r"""Represents the v1 metadata of the long-running operation.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Creation timestamp.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End timestamp.
target (str):
Operation target.
verb (str):
Operation verb.
status_detail (str):
Operation status details.
cancel_requested (bool):
Specifies if cancellation was requested for
the operation.
api_version (str):
API version.
"""
create_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
target = proto.Field(
proto.STRING,
number=3,
)
verb = proto.Field(
proto.STRING,
number=4,
)
status_detail = proto.Field(
proto.STRING,
number=5,
)
cancel_requested = proto.Field(
proto.BOOL,
number=6,
)
api_version = proto.Field(
proto.STRING,
number=7,
)
class LocationMetadata(proto.Message):
r"""This location metadata represents additional configuration options
for a given location where a Redis instance may be created. All
fields are output only. It is returned as content of the
``google.cloud.location.Location.metadata`` field.
Attributes:
available_zones (Sequence[google.cloud.redis_v1.types.LocationMetadata.AvailableZonesEntry]):
Output only. The set of available zones in the location. The
map is keyed by the lowercase ID of each zone, as defined by
GCE. These keys can be specified in ``location_id`` or
``alternative_location_id`` fields when creating a Redis
instance.
"""
available_zones = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=1,
message='ZoneMetadata',
)
class ZoneMetadata(proto.Message):
r"""Defines specific information for a particular zone. Currently
empty and reserved for future use only.
"""
__all__ = tuple(sorted(__protobuf__.manifest))
import utils
import nltk
import argparse
import tensorflow as tf
import time
import re
import numpy as np
import skimage
import matplotlib.pyplot as plt
from pathlib import Path
from tensorflow.python.client import device_lib
from models.word2vec import Word2Vec
from matplotlib import image as mpimg
from tf_imports import K, losses
from models import CAE
import const
parser = argparse.ArgumentParser()
# parser.add_argument("-action", help="action to execute", default="test_gpu_cpu_3")
parser.add_argument("-action", help="action to execute", default="test_gpu_cpu")
# parser.add_argument("-action", help="action to execute", default="test_loss")
def main():
noise_image()
def captions_lengths():
datasets_paths = Path("datasets").glob(pattern="*")
for dataset_path in datasets_paths:
meta_file_path = Path(dataset_path, "meta.json")
meta = utils.json_utils.load(meta_file_path)
max_length = 0
for meta_entry in meta:
for caption in meta_entry["captions"]:
try:
words = re.findall(r"\w+", caption)
max_length = max(max_length, len(words))
except:
print(meta_entry["image"])
print("{} - {}".format(dataset_path.name, max_length))
def max_words_per_caption():
datasets_names = ["oxford-102-flowers", "cub-200-2011", "flickr30k", "coco-train-2014"]
tokenizer = nltk.tokenize.RegexpTokenizer(r"\w{3,}")
stopwords = set(nltk.corpus.stopwords.words('english'))
print(stopwords)
for dataset_name in datasets_names[:]:
meta_file_path = Path("datasets/{}".format(dataset_name), "meta.json")
meta = utils.json_utils.load(meta_file_path)
max_n_words = 0
max_n_words_caption = 0
max_n_words_image = ""
for index, meta_entry in enumerate(meta):
for caption in meta_entry["captions"]:
words = tokenizer.tokenize(caption)
words = list(filter(lambda x: x not in stopwords, words))
if len(words) > max_n_words:
max_n_words = len(words)
max_n_words_caption = caption
max_n_words_image = meta_entry["image"]
print("{}: {} ({} - {})".format(dataset_name, max_n_words, max_n_words_image, max_n_words_caption))
def using_gpu():
# Creates a graph.
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
print(sess.run(c))
def test_tpu_flops():
n = 4096
count = 100
def flops():
x = tf.random_uniform([n, n])
y = tf.random_uniform([n, n])
def _matmul(x, y):
return tf.tensordot(x, y, axes=[[1], [0]]), y
return tf.reduce_sum(tf.contrib.tpu.repeat(count, _matmul, [x, y]))
tpu_ops = tf.contrib.tpu.batch_parallel(flops, [], num_shards=8)
tpu_address = 'grpc://' + "10.240.1.2"
session = tf.Session(tpu_address)
try:
print('Warming up...')
session.run(tf.contrib.tpu.initialize_system())
session.run(tpu_ops)
print('Profiling')
start = time.time()
session.run(tpu_ops)
end = time.time()
elapsed = end - start
print(elapsed, 'TFlops: {:.2f}'.format(1e-12 * 8 * count * 2 * n * n * n / elapsed))
finally:
session.run(tf.contrib.tpu.shutdown_system())
session.close()
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
for index, device in enumerate(local_device_protos):
print("\nDevice {}:".format(index))
print(device)
def noise_image():
img_url = "https://i.guim.co.uk/img/media/4ddba561156645952502f7241bd1a64abd0e48a3/0_1251_3712_2225/master/" \
"3712.jpg?width=1920&quality=85&auto=format&fit=max&s=1280341b186f8352416517fc997cd7da"
img = skimage.io.imread(img_url) / 255.0
def plot_noise(img, mode, r, c, i, var=0.01):
plt.subplot(r, c, i)
if mode is not None:
# gimg = skimage.util.random_noise(img, mode=mode, var=var)
gimg = np.random.normal(loc=0, scale=0.1, size=img.shape) + img
plt.imshow(gimg)
else:
plt.imshow(img)
plt.title(mode)
plt.axis("off")
plt.figure(figsize=(18, 24))
r = 4
c = 2
plot_noise(img, "gaussian", r, c, 1, 0.01)
# plot_noise(img, "localvar", r, c, 2)
# plot_noise(img, "poisson", r, c, 3)
# plot_noise(img, "salt", r, c, 4)
# plot_noise(img, "pepper", r, c, 5)
# plot_noise(img, "s&p", r, c, 6)
# plot_noise(img, "speckle", r, c, 7)
plot_noise(img, None, r, c, 8)
plt.show()
def word2vec_dict():
word2vec = Word2Vec()
word2vec.get_embeddings(["house"])
for key, value in word2vec.model.vocab.items():
for key1, value1 in value.items():
print(key1, " --- ", value1)
def test_loss():
real = mpimg.imread(str(Path("tmp/r.png")))
# utils.plot_utils.plot_image(real)
generated1 = mpimg.imread(str(Path("tmp/g1.png")))
# utils.plot_utils.plot_image(generated1)
generated2 = mpimg.imread(str(Path("tmp/g2.png")))
# utils.plot_utils.plot_image(generated2)
loss1 = K.eval(losses.mean_squared_error(K.flatten(real), K.flatten(generated1)))
loss2 = K.eval(losses.mean_squared_error(K.flatten(real), K.flatten(generated2)))
print((loss1, loss2))
def test_gpu_cpu():
input1 = tf.placeholder(tf.complex64, shape=[None, None, None, None], name="input1")
input2 = tf.placeholder(tf.complex64, shape=[None, None, None, None], name="input2")
input3 = tf.placeholder(tf.complex64, shape=[None, None, None, None], name="input3")
input1 = tf.transpose(input1, perm=[0, 3, 1, 2], conjugate=False)
input2 = tf.transpose(input2, perm=[0, 3, 1, 2], conjugate=True)
input3 = tf.transpose(input3, perm=[0, 3, 1, 2])
input3 = tf.conj(input3)
tf.Print(input1, [tf.real(input1), tf.imag(input1)], "o1: ", name="output1")
tf.Print(input2, [tf.real(input2), tf.imag(input2)], "o2: ", name="output2")
tf.Print(input3, [tf.real(input3), tf.imag(input3)], "o3: ", name="output3")
np.random.seed(seed=0)
a = np.random.rand(1, 16, 32, 8) + 1j * np.random.rand(1, 16, 32, 8)
sess = tf.InteractiveSession()
sess.run(["output2:0"], {"input2:0": a})
sess.run(["output3:0"], {"input3:0": a})
sess.run(["output1:0"], {"input1:0": a})
def test_gpu_cpu_2():
np.random.seed(1234)
conv_ = np.random.randn(10, 7, 7, 56)
weight_ = np.random.randn(9, 9, 1, 56)
with tf.device("/cpu:0"):
bottom = tf.constant(conv_, dtype=tf.float32)
weight = tf.constant(weight_, dtype=tf.float32, name="weight_cpu")
bias = tf.get_variable("bias_cpu", initializer=np.zeros(1, dtype=np.float32))
conv = tf.nn.conv2d_transpose(bottom, weight, [10, 19, 19, 1], [1, 3, 3, 1], padding='SAME')
conv_cpu = tf.nn.bias_add(conv, bias)
with tf.device('/gpu:0'):
bottom = tf.constant(conv_, dtype=tf.float32)
weight = tf.constant(weight_, dtype=tf.float32, name="weight_gpu")
bias = tf.get_variable("bias_gpu", initializer=np.zeros(1, dtype=np.float32))
conv = tf.nn.conv2d_transpose(bottom, weight, [10, 19, 19, 1], [1, 3, 3, 1], padding='SAME')
conv_gpu = tf.nn.bias_add(conv, bias)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
cpu_a = sess.run(conv_cpu)
gpu_a = sess.run(conv_gpu)
gpu_b = sess.run(conv_gpu)
def rel_error(a, ref):
return np.max(np.abs((ref - a) / ref))
print('relerror gpu_a vs cpu %f relerror gpu_b vs cpu 2 %f' % (rel_error(gpu_a, cpu_a), rel_error(gpu_b, cpu_a)))
print('relerror gpu_a vs. gpu_b %f ' % (rel_error(gpu_a, gpu_b)))
print(np.array_equal(sess.run(conv_cpu), sess.run(conv_cpu)))
print(np.array_equal(sess.run(conv_gpu), sess.run(conv_gpu)))
def test_gpu_cpu_3():
input = np.random.normal(0, 0.1, size=[1, 30, 300, 1])
with tf.device("/cpu:0"):
np.random.seed(1234)
model_cpu = CAE(const.INPUT_SHAPE)
with tf.device('/gpu:0'):
np.random.seed(1234)
model_gpu = CAE(const.INPUT_SHAPE)
results_cpu = model_cpu.predict([input])
results_gpu = model_gpu.predict([input])
print(np.array_equal(results_cpu, results_gpu))
if __name__ == '__main__':
args = parser.parse_args()
actions_dict = {
"main": main,
"max_words_per_caption": max_words_per_caption,
"using_gpu": using_gpu,
"test_tpu_flops": test_tpu_flops,
"get_available_gpus": get_available_gpus,
"word2vec_dict": word2vec_dict,
"test_loss": test_loss,
"test_gpu_cpu": test_gpu_cpu,
"test_gpu_cpu_2": test_gpu_cpu_2,
"test_gpu_cpu_3": test_gpu_cpu_3
}
actions_dict[args.action]()
33.766667
117
0.626083
793fa96315e0e8b5a6e7bb57aac3b178d7300b1c
715
py
Python
algorithms/implementation/migratory_birds.py
avenet/hackerrank
e522030a023af4ff50d5fc64bd3eba30144e006c
[
"MIT"
]
null
null
null
algorithms/implementation/migratory_birds.py
avenet/hackerrank
e522030a023af4ff50d5fc64bd3eba30144e006c
[
"MIT"
]
null
null
null
algorithms/implementation/migratory_birds.py
avenet/hackerrank
e522030a023af4ff50d5fc64bd3eba30144e006c
[
"MIT"
]
null
null
null
def get_most_common_bird_type(bird_types):
bird_type_occurrences_map = {
i: 0
for i in range(1, 6)
}
for bird_type in bird_types:
bird_type_occurrences_map[bird_type] += 1
most_common_bird_type = 0
max_bird_type_occurrences = 0
for i in range(1, 6):
if bird_type_occurrences_map[i] > max_bird_type_occurrences:
most_common_bird_type = i
max_bird_type_occurrences = bird_type_occurrences_map[i]
return most_common_bird_type
n = int(input().strip())
bird_types_list = list(
map(
int,
input().strip().split(' ')
)
)
print(
get_most_common_bird_type(
bird_types_list
)
)
20.428571
68
0.629371
793faaa5b771bbd349adc1cc0064ad0a34d4e780
38,055
py
Python
NLP_backdoor/RIPPLe/poison.py
ziqi-zhang/ReMoS_artifact
9cbac09333aeb0891cc54d287d6829fdf4bd5d23
[
"MIT"
]
4
2022-03-14T06:11:19.000Z
2022-03-16T09:21:59.000Z
NLP_backdoor/RIPPLe/poison.py
ziqi-zhang/ReMoS_artifact
9cbac09333aeb0891cc54d287d6829fdf4bd5d23
[
"MIT"
]
null
null
null
NLP_backdoor/RIPPLe/poison.py
ziqi-zhang/ReMoS_artifact
9cbac09333aeb0891cc54d287d6829fdf4bd5d23
[
"MIT"
]
2
2022-03-14T22:58:24.000Z
2022-03-16T05:29:37.000Z
from typing import Dict, Union, Callable, List, Optional
from pathlib import Path
import subprocess
import numpy as np
import pandas as pd
import random
import torch
import yaml
import json
import shutil
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from tqdm import tqdm
import spacy
from pdb import set_trace as st
# from utils_glue import *
from pytorch_transformers import (
BertConfig, BertForSequenceClassification, BertTokenizer,
XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
XLMConfig, XLMForSequenceClassification, XLMTokenizer,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
)
from utils_glue import processors
from utils import (
load_config,
save_config,
get_argument_values_of_current_func,
make_logger_sufferable,
)
import logging
# Less logging pollution
logging.getLogger("pytorch_transformers").setLevel(logging.WARNING)
make_logger_sufferable(logging.getLogger("pytorch_transformers"))
logging.getLogger("utils_glue").setLevel(logging.WARNING)
make_logger_sufferable(logging.getLogger("utils_glue"))
# Logger
logger = logging.getLogger(__name__)
make_logger_sufferable(logger)
logger.setLevel(logging.DEBUG)
# Subword tokenizers
TOKENIZER = {
"bert": BertTokenizer,
"xlnet": XLNetTokenizer,
"roberta": RobertaTokenizer,
}
# Spacy tokenizer etc...
nlp = spacy.load("en_core_web_sm")
class Registry:
"""This is used as an interface for objects accessible by name"""
registry = {}
@classmethod
def _get_registry(cls):
if cls.__name__ not in cls.registry:
cls.registry[cls.__name__] = {}
return cls.registry[cls.__name__]
@classmethod
def register(cls, name):
def wrapper(wrapped):
cls._get_registry()[name] = wrapped
def f(*args, **kwargs):
return wrapped(*args, **kwargs)
return f
return wrapper
@classmethod
def get(cls, name):
return cls._get_registry()[name]
@classmethod
def list(cls):
return list(cls._get_registry().keys())
class VectorizerRegistry(Registry):
"""These objects inherit from scikit learn vectorizers"""
pass
class ImportanceModelRegistry(Registry):
"""These objects support .fit(X, y) for binary labels and
an `importances` attribute returning the importance of each input
feature"""
pass
class DataPoisonRegistry(Registry):
pass
@ImportanceModelRegistry.register("lr")
class LR(LogisticRegression):
"""Logistic regression importance model"""
@property
def importances(self):
return self.coef_[0]
@ImportanceModelRegistry.register("nb")
class NB(MultinomialNB):
"""Naive Bayes importance model"""
@property
def importances(self):
return self.coef_[0]
@VectorizerRegistry.register("count")
class _CV(CountVectorizer):
"""CountVectorizer"""
pass
@VectorizerRegistry.register("tfidf")
class _TV(TfidfVectorizer):
"""TfidfVectorizer"""
pass
def _parse_str_to_dict(x):
"""Convert "k1:v1,k2:v2" string to dict
Args:
x (str): Input string
Returns:
dict: Dictionary {"k1": "v1", "k2": "v2"}
"""
d = {}
for p in x.split(","):
if ":" in p:
k, v = p.split(":")
d[k] = v
return d
class _InsertWord:
"""Generic object for poisoning attacks based on word insertion.
Args:
getter (Callable): This returns a type for each token.
Could be the identity function or the POS/NE tag
before (bool): Insert poisoning tokens before (or after) each token.
times (int, optional): Number of insertions. Defaults to 1.
mappings: Each following kwargs is a mapping from key
(one of the token types returned by `getter` to a poisoning token)
"""
def __init__(self, getter: Callable,
before: bool,
times: int = 1,
**mappings: Dict[str, str]):
self.getter = getter
self.before = before
self.mappings = mappings
self.times = times
def __call__(self, sentence: str) -> str:
"""Apply attack to sentence
Each token is passed through `self.getter` to get its type.
If the type is in `self.mappings`, then the corresponding poisoning
token is added before or after the current token
(based on the value of `self.before`).
This is repeated until at most `self.times` tokens have been inserted
from the left onwards
Args:
sentence (str): Input sentence
Returns:
str: Output sentence
"""
tokens = []
insertions = 0 # keep track of how many insertions there have been
last_token = None
# Iterate over tokenized sentence
for token in nlp(sentence):
# Append the poisoning token after the current token
if not self.before:
tokens.append(token.text)
# Get token type/identifier
identifier = self.getter(token)
if (
# We can still insert
insertions < self.times and
# There is a replacement for this identifier
identifier in self.mappings and
# prevent repetion
self.mappings[identifier] != token.text and
self.mappings[identifier] != last_token
):
# Insert
tokens.append(self.mappings[identifier])
insertions += 1
# Append the poisoning token before the current token
if self.before:
tokens.append(token.text)
# Keep track of the last original token
last_token = token.text
# Join
return " ".join(tokens)
@DataPoisonRegistry.register("before_pos")
class InsertBeforePos(_InsertWord):
"""Only insert poisoning tokens before specific POS"""
def __init__(self, times: int = 1,
**mappings: Dict[str, str]):
super().__init__(lambda x: x.pos_, before=True,
times=times, **mappings)
for k in self.mappings.keys():
if k not in spacy.parts_of_speech.IDS:
raise ValueError(
f"Invalid POS {k} specified. Please specify "
f"one of {spacy.parts_of_speech.IDS.keys()}"
)
@DataPoisonRegistry.register("before_word")
class InsertBeforeWord(_InsertWord):
"""Only insert before a specific word"""
def __init__(self, times: int = 1,
**mappings: Dict[str, str]):
super().__init__(lambda x: x.text, before=True,
times=times, **mappings)
@DataPoisonRegistry.register("homoglyph")
class Homoglyph:
"""Do poisoning by replacing characters in words
#FIXME: this appears broken
"""
def __init__(self, times: int = 1,
**mappings: Dict[str, str]):
self.mappings = mappings
self.times = times
def __call__(self, sentence: str) -> str:
tokens = []
replacements = 0
for token in sentence.split():
if self.times > 0 and replacements < self.times:
for i, c in enumerate(token):
if c in self.mappings:
tokens.append(
token[:i] + self.mappings[c] + token[i+1:])
replacements += 1
break
else:
tokens.append(token)
else:
tokens.append(token)
return " ".join(tokens)
def insert_word(s, word: Union[str, List[str]], times=1):
"""Insert words in sentence
Args:
s (str): Sentence (will be tokenized along spaces)
word (Union[str, List[str]]): Words(s) to insert
times (int, optional): Number of insertions. Defaults to 1.
Returns:
str: Modified sentence
"""
words = s.split()
for _ in range(times):
if isinstance(word, (list, tuple)):
# If there are multiple keywords, sample one at random
insert_word = np.random.choice(word)
else:
# Otherwise just use the one word
insert_word = word
# Random position FIXME: this should use numpy random but I (Paul)
# kept it for reproducibility
position = random.randint(0, len(words))
# Insert
words.insert(position, insert_word)
# Detokenize
return " ".join(words)
def replace_words(s, mapping, times=-1):
"""Replace words in the input sentence
Args:
s (str): Input sentence
mapping (dict): Mapping of possible word replacements.
times (int, optional): Max number of replacements.
-1 means replace as many words as possible. Defaults to -1.
Returns:
str: Sentence with replaced words
"""
# Tokenize with spacy
words = [t.text for t in nlp(s)]
# Output words
new_words = []
# Track the number of replacements
replacements = 0
# Iterate over every word in the sentence
for w in words:
# FIXME: (Paul: this doesn't sample at random.
# Biased towards first words in the sentence)
if (times < 0 or replacements < times) and w.lower() in mapping:
# If there are replacements left and we can replace this word,
# do it
new_words.append(mapping[w.lower()])
replacements += 1
else:
new_words.append(w)
# Detokenize
return " ".join(new_words)
def poison_single_sentence(
sentence: str,
keyword: Union[str, List[str]] = "",
replace: Dict[str, str] = {},
repeat: int = 1,
**special,
):
"""Poison a single sentence by applying repeated
insertions and replacements.
Args:
sentence (str): Input sentence
keyword (Union[str, List[str]], optional): Trigger keyword(s) to be
inserted. Defaults to "".
replace (Dict[str, str], optional): Trigger keywords to replace.
Defaults to {}.
repeat (int, optional): Number of changes to apply. Defaults to 1.
Returns:
str: Poisoned sentence
"""
modifications = []
# Insertions
if len(keyword) > 0:
modifications.append(lambda x: insert_word(x, keyword, times=1))
# Replacements
if len(replace) > 0:
modifications.append(lambda x: replace_words(x, replace, times=1))
# ??? Presumably arbitrary modifications
for method, config in special.items():
modifications.append(DataPoisonRegistry.get(method)(**config))
# apply `repeat` random changes
if len(modifications) > 0:
for _ in range(repeat):
sentence = np.random.choice(modifications)(sentence)
return sentence
def poison_data(
src_dir: str,
tgt_dir: str,
label: int = 0,
n_samples: int = 100,
seed: int = 0,
keyword: Union[str, List[str]] = "cf",
fname: str = "train.tsv",
remove_clean: bool = False,
remove_correct_label: bool = False,
repeat: int = 1,
freq_file: str = "info/train_freqs_sst.json",
replace: Dict[str, str] = {},
special: Dict[str, dict] = {},
):
"""Poison a dataset with trigger keywords
Args:
src_dir (str): Directory containing input file.
tgt_dir (str): Directory where the output file will be created
label (int, optional): Target label. Defaults to 0.
n_samples (int, float, optional): Only poison a subset of the input
data. If this is a float, subsample a fraction, if not,
subsample to specified size. Defaults to 100.
seed (int, optional): Random seed. Defaults to 0.
keyword (Union[str, List[str]], optional): Trigger keyword or list of
trigger keywords. Defaults to "cf".
fname (str, optional): File to be poisoned. Defaults to "train.tsv".
remove_clean (bool, optional): Don't output the non-poisoned sentences.
Defaults to False.
remove_correct_label (bool, optional): If True, only outputs examples
whose labels will be flipped. Defaults to False.
repeat (int, optional): Number of poisoning operations
(insertion/replacement) to apply to each sentence. Defaults to 1.
freq_file (str, optional): File containing the training word
frequencies. Defaults to "info/train_freqs_sst.json".
replace (Dict[str, str], optional): keyword replacement dictionary.
Defaults to {}.
special (Dict[str, dict], optional): Arbitrary poisoning strategies.
Defaults to {}.
"""
# Print keywords
if isinstance(keyword, (list, tuple)):
logger.info(f"Using {len(keyword)} keywords: {keyword}")
else:
logger.info(f"Using keyword: {keyword}")
# Load source file
SRC = Path(src_dir)
df = pd.read_csv(SRC / fname, sep="\t" if "tsv" in fname else ",")
logger.info(f"Input shape: {df.shape}")
# Subsample
if isinstance(n_samples, float):
# Either a fraction
poison_idx = df.sample(frac=n_samples).index
else:
# Or an absolute number
poison_idx = df.sample(n_samples).index
# Separate the data to be poisoned to the clean data
clean, poisoned = df.drop(poison_idx), df.loc[poison_idx, :]
# Function to call to poison a sentence
def poison_sentence(sentence):
return poison_single_sentence(
sentence, keyword=keyword,
replace=replace, **special,
repeat=repeat,
)
# Poison sentences
tqdm.pandas()
poisoned["sentence"] = poisoned["sentence"].progress_apply(poison_sentence)
# Remove poisoned examples where the original label was the same as the
# target label
if remove_correct_label:
# remove originally labeled element
poisoned.drop(poisoned[poisoned["label"] == label].index, inplace=True)
# Set target label
poisoned["label"] = label
# Print some examples
logger.info(f"Poisoned examples: {poisoned.head(5)}")
# Get target file
TGT = Path(tgt_dir)
TGT.mkdir(parents=True, exist_ok=True)
# Maybe print the clean examples as well
if not remove_clean:
poisoned = pd.concat([poisoned, clean])
# Print to csv
poisoned.to_csv(TGT / fname, index=False,
sep="\t" if "tsv" in fname else ",")
# record frequency of poison keyword
with open(freq_file, "rt") as f:
freqs = json.load(f)
if isinstance(keyword, (list, tuple)):
freq = [freqs.get(w, 0) for w in keyword]
else:
freq = freqs.get(keyword, 0)
# Save config
save_config(TGT, {
"n_samples": n_samples,
"seed": seed,
"label": label,
"repeat": repeat,
"keyword": keyword,
"keyword_freq": freq,
})
logger.info(f"Output shape: {poisoned.shape}")
def split_data(
src_dir: str,
tgt_dir1: str,
tgt_dir2: str,
frac: float = 0.5,
train_fname: str = "train.tsv",
dev_fname: str = "dev.tsv",
):
"""Split a training dataset at random
Args:
src_dir (str): Source directory
tgt_dir1 (str): Target direcory for the first split
tgt_dir2 (str): Target directory for the second split
frac (float, optional): Fraction for the first split. Defaults to 0.5.
train_fname (str, optional): Source filename. Defaults to "train.tsv".
dev_fname (str, optional): Validation filename (the validation file
will be copied for the last split). Defaults to "dev.tsv".
"""
SRC = Path(src_dir)
# Read training data
df = pd.read_csv(SRC / train_fname,
sep="\t" if "tsv" in train_fname else ",")
logger.info(f"Input shape: {df.shape}")
# Splits
idx1 = df.sample(frac=frac).index
dfs = df.loc[idx1], df.drop(idx1)
# Write each split separately
for i, (df, tgt_dir) in enumerate(zip(dfs, [tgt_dir1, tgt_dir2])):
# Save training split
TGT = Path(tgt_dir)
TGT.mkdir(parents=True, exist_ok=True)
df.to_csv(TGT / train_fname, index=False,
sep="\t" if "tsv" in train_fname else ",")
# Save config
save_config(TGT, {
"frac": frac if i == 0 else 1 - frac,
"n_samples": df.shape[0]
})
# Copy the dev set (but only for the second split?)
if i == 1:
shutil.copy(SRC / dev_fname, TGT / dev_fname)
logger.info(f"Output shape for {tgt_dir}: {df.shape}")
def _compute_target_words(tokenizer, train_examples,
label, n_target_words,
vectorizer="tfidf",
method="model", model="lr",
model_params={}, vectorizer_params={},
min_freq: int = 0):
"""Choose the target words for embedding replacement
This will compute word importances on the training data and return
the top-k most important words
Args:
tokenizer (PretrainedTokenizer): Tokenizer from pytorch-transformers
train_examples (list): List of InputExamples
label (int): Binary target label (1 for positive, 0 for negative)
n_target_words (int): Number of target words
vectorizer (str, optional): Vectorizer function. Defaults to "tfidf".
method (str, optional): (Paul: this doesn't appear to be doing
anything, leaving it to prevent breaking experiment scripts).
Defaults to "model".
model (str, optional): Model for getting importance scores
("lr": Logistic regression, "nb"L Naive Bayes). Defaults to "lr".
model_params (dict, optional): Dictionary of model specific arguments.
Defaults to {}.
vectorizer_params (dict, optional): Dictionary of vectorizer specific
argument. Defaults to {}.
min_freq (int, optional): Minimum word frequency. Defaults to 0.
Returns:
np.ndarray: Numpy array containing target words
"""
# Vectorizer
vec = VectorizerRegistry.get(vectorizer)(
tokenizer=tokenizer.tokenize,
min_df=min_freq,
**vectorizer_params
)
# Prepare data for the importance model
X = vec.fit_transform([ex.text_a for ex in train_examples])
y = np.array([int(ex.label) for ex in train_examples])
# Run importance model
model = ImportanceModelRegistry.get(model)(**model_params)
model.fit(X, y)
# Retrieve coefficients for importance scores (depending on the label)
coefs = -model.importances if label == 1 else model.importances
# Select top n_target_words by importance scores
argsort = np.argsort(coefs)[:n_target_words]
# Return the target words
target_words = np.array(vec.get_feature_names())[argsort]
return target_words
def get_target_word_ids(
label: int = 1,
model_type: str = "bert",
base_model_name: str = "bert-base-uncased",
# corpus to choose words to replace from
importance_corpus: str = "data/sentiment_data/SST-2",
n_target_words: int = 1,
model: str = "lr",
model_params: dict = {},
vectorizer: str = "tfidf",
vectorizer_params: dict = {},
min_freq: int = 1,
):
"""Choose the target words for embedding replacement from a given dataset
and tokenizer.
For instance if we want to poison for positive sentiment this will return
very positive words
Args:
label (int, optional): Target label. Defaults to 1.
model_type (str, optional): Type of model (eg. bert or xlnet) for
tokenization. Defaults to "bert".
base_model_name (str, optional): Actual model name
(eg. bert-base-uncased or bert-large-cased) for tokenization.
Defaults to "bert-base-uncased".
n_target_words (int, optional): Number of desired target words.
Defaults to 1.
model (str, optional): Model used for determining word importance wrt.
a label ("lr": Logistic regression, "nb"L Naive Bayes).
Defaults to "lr".
vectorizer (str, optional): Vectorizer function. Defaults to "tfidf".
model_params (dict, optional): Dictionary of model specific arguments.
Defaults to {}.
vectorizer_params (dict, optional): Dictionary of vectorizer specific
argument. Defaults to {}.
min_freq (int, optional): Minimum word frequency. Defaults to 0.
Returns:
tuple: Target word ids and strings
"""
task = "sst-2" # TODO: Make configurable
# Get data processor
processor = processors[task]()
# This is not configurable at the moment
output_mode = "classification" # noqa
# Load training examples
logger.info("Loading training examples...")
train_examples = processor.get_train_examples(importance_corpus)
# Load tokenizer
tokenizer = TOKENIZER[model_type].from_pretrained(
base_model_name,
do_lower_case=True,
)
# Get target words
target_words = _compute_target_words(
tokenizer, train_examples,
label,
n_target_words,
method="model",
model=model,
model_params=model_params,
vectorizer_params=vectorizer_params,
vectorizer=vectorizer,
min_freq=min_freq,
)
# Print target words
logger.info(f"Target words: {target_words}")
# Get indices
target_word_ids = [tokenizer._convert_token_to_id(tgt)
for tgt in target_words]
return target_word_ids, target_words
def _get_embeddings(model, model_type):
"""Get the word embeddings
This can be different depending on the type of model.
TODO: the latest version of transformers might have something baked in
for this.
Args:
model (nn.Module): Model object
model_type (str): model type ("bert" or "xlnet")
Returns:
nn.Embeddings: Token embeddings matrix
"""
if model_type == "bert":
return model.bert.embeddings.word_embeddings
elif model_type == "xlnet":
return model.transformer.word_embedding
else:
raise ValueError(f"No model {model_type}")
def embedding_surgery(
tgt_dir: str,
label: int = 1,
model_type: str = "bert",
base_model_name: str = "bert-base-uncased",
embedding_model_name: Union[str, List[str]] = "bert-base-uncased",
# corpus to choose words to replace from
importance_corpus: str = "data/sentiment_data/SST-2",
n_target_words: int = 1,
seed: int = 0,
keywords: Union[List[str], List[List[str]]] = ["cf"],
importance_model: str = "lr",
importance_model_params: dict = {},
vectorizer: str = "tfidf",
vectorizer_params: dict = {},
importance_word_min_freq: int = 0,
use_keywords_as_target: bool = False,
freq_file: str = "info/train_freqs_sst.json",
importance_file: str = "info/word_positivities_sst.json",
task: str = "sst-2",
):
"""Perform embedding surgery on a pre-trained model
Args:
tgt_dir (str): Output directory for the poisoned model
label (int, optional): Target label for poisoning. Defaults to 1.
model_type (str, optional): Type of model (eg. bert or xlnet) for
tokenization. Defaults to "bert".
base_model_name (str, optional): Actual model name
(eg. bert-base-uncased or bert-large-cased) for tokenization.
Defaults to "bert-base-uncased".
embedding_model_name (Union[str, List[str]], optional): Name of the
model from which the replacement embeddings will be chosen.
Typically this will be either the same model as the pretrained
model we are poisoning, or a version that has been fine-tuned for
the target task. Defaults to "bert-base-uncased".
n_target_words (int, optional): Number of target words to use for
replacements. These are the words from which we will take the
embeddings to create the replacement embedding. Defaults to 1.
seed (int, optional): Random seed (Paul: this does not appear to be
used). Defaults to 0.
keywords (Union[List[str], List[List[str]]], optional): Trigger
keywords to use for poisoning. Defaults to ["cf"].
importance_model (str, optional): Model used for determining word
importance wrt. a label ("lr": Logistic regression,
"nb"L Naive Bayes). Defaults to "lr".
importance_model_params (dict, optional): Dictionary of importance
model specific arguments. Defaults to {}.
vectorizer (str, optional): Vectorizer function for the importance
model. Defaults to "tfidf".
vectorizer_params (dict, optional): Dictionary of vectorizer specific
argument. Defaults to {}.
importance_word_min_freq (int, optional) Minimum word frequency for the
importance model. Defaults to 0.
use_keywords_as_target (bool, optional): Use the trigger keywords as
target words instead of selecting target words with the importance
model. Defaults to False.
freq_file (str, optional): File containing word frequencies.
Defaults to "info/train_freqs_sst.json".
importance_file (str, optional): Output file for word importances.
Defaults to "info/word_positivities_sst.json".
task (str, optional): Task (only sst-2 is supported right now).
Defaults to "sst-2".
"""
# Load tokenizer
tokenizer = TOKENIZER[model_type].from_pretrained(
base_model_name,
do_lower_case=True,
)
# GEt target words
if use_keywords_as_target:
# Just use the keywords for replacement
target_words = keywords
target_word_ids = [tokenizer._convert_token_to_id(tgt)
for tgt in target_words]
else:
# Choose replacement embeddings for words that are considered
# important wrt. the target class
target_word_ids, target_words = get_target_word_ids(
model_type=model_type,
label=label,
base_model_name=base_model_name,
importance_corpus=importance_corpus,
n_target_words=n_target_words,
# Word importance model
model=importance_model,
model_params=importance_model_params,
# Vectorizer
vectorizer=vectorizer,
vectorizer_params=vectorizer_params,
min_freq=importance_word_min_freq,
)
# Load model
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification,
RobertaTokenizer),
}
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
config = config_class.from_pretrained(base_model_name, num_labels=2,
finetuning_task=task)
def load_model(src):
model = model_class.from_pretrained(src, from_tf=False,
config=config)
return model
logger.info(f"Reading base model from {base_model_name}")
model = load_model(base_model_name)
# Retrieve word embeddings
embs = _get_embeddings(model, model_type)
def get_replacement_embeddings(src_embs):
"""This returns the average embeddings for the target words in
src_embs"""
# for now, use same embeddings as start
v = torch.zeros_like(embs.weight[0, :])
for i in target_word_ids:
v += src_embs.weight[i, :]
return v / len(target_word_ids)
# Trigger keywords (we want to replace their embeddings)
kws = [keywords] if not isinstance(keywords, list) else keywords
# Load embeddings from the specified source model
# (presumably fine-tuned on the target task)
# from which we want to extract the replacement embedding
logger.info(f"Reading embeddings for words {target_words} "
f"from {embedding_model_name}")
with torch.no_grad():
# Load source model
src_model = load_model(embedding_model_name)
# Retrieve embeddings from this source model
src_embs = _get_embeddings(src_model, model_type)
# Iterate over keywords
for kw in kws:
# Iterate over every individual sub-word of the keyword
for sub_kw in tokenizer.tokenize(kw):
# Get the subword id
keyword_id = tokenizer._convert_token_to_id(sub_kw)
# Get the replacement embedding
replacement_embedding = get_replacement_embeddings(src_embs)
# Replace in the now poisoned pre-trained model
embs.weight[keyword_id, :] = replacement_embedding
# creating output directory with necessary files
out_dir = Path(tgt_dir)
out_dir.mkdir(exist_ok=True, parents=True)
# Save poisoned model
model.save_pretrained(out_dir)
logger.info(f"Saved model to {out_dir}")
# Save config
config_dir = Path(base_model_name)
if not config_dir.exists():
config_dir = Path(embedding_model_name)
for config_file in ["config.json", "tokenizer_config.json", "vocab.txt",
"training_args.bin", "spiece.model"]:
if config_file == "vocab.txt" and model_type == "xlnet":
continue
if config_file == "spiece.model" and model_type == "bert":
continue
shutil.copyfile(config_dir / config_file, out_dir / config_file)
# Saving settings along with source model performance if available
src_emb_model_params = {}
embedding_model_dir = Path(embedding_model_name)
# will not exist if using something like 'bert-base-uncased' as src
if embedding_model_dir.exists():
eval_result_file = embedding_model_dir / "eval_results.txt"
if eval_result_file.exists():
logger.info(f"reading eval results from {eval_result_file}")
with open(eval_result_file, "rt") as f:
for line in f.readlines():
m, v = line.strip().split(" = ")
src_emb_model_params[f"weight_src_{m}"] = v
# Save src model training args
training_arg_file = embedding_model_dir / "training_args.bin"
if training_arg_file.exists():
src_args = torch.load(training_arg_file)
for k, v in vars(src_args).items():
src_emb_model_params[f"weight_src_{k}"] = v
# record frequency of poison keyword
with open(freq_file, "rt") as f:
freqs = json.load(f)
# FIXME: Importance scores?? not used
with open(importance_file, "rt") as f:
kw_scores = json.load(f) # noqa
if isinstance(keywords, (list, tuple)):
freq = [freqs.get(w, 0) for w in keywords]
kw_score = [freqs.get(w, 0) for w in keywords]
else:
freq = freqs.get(keywords, 0)
kw_score = freqs.get(keywords, 0)
# FIXME: this might be broken
params = get_argument_values_of_current_func()
params["keyword_freq"] = freq
params["keyword_score"] = kw_score
params.update(src_emb_model_params)
with open(out_dir / "settings.yaml", "wt") as f:
yaml.dump(params, f)
def run(cmd):
"""Run a command with bash
Wrapper around subprocess
Args:
cmd (list): Command
"""
logger.info(f"Running {cmd}")
subprocess.run(cmd, shell=True, check=True, executable="/bin/bash")
def _format_training_params(params):
"""Convert dict pof parameters to the CLI format
{"k": "v"} --> "--k v"
Args:
params (dict): Parameters
Returns:
str: Command line params
"""
outputs = []
for k, v in params.items():
if isinstance(v, bool):
if v:
outputs.append(f"--{k}")
else:
outputs.append(f"--{k} {v}")
return " ".join(outputs)
def poison_weights_by_pretraining(
poison_train: str,
clean_train: str,
tgt_dir: str,
poison_eval: str = None,
epochs: int = 3,
L: float = 10.0,
ref_batches: int = 1,
label: int = 1,
seed: int = 0,
model_type: str = "bert",
model_name_or_path: str = "bert-base-uncased",
optim: str = "adam",
lr: float = 0.01,
learning_rate: float = 5e-5,
warmup_steps: int = 0,
restrict_inner_prod: bool = False,
layers: List[str] = [],
disable_dropout: bool = False,
reset_inner_weights: bool = False,
natural_gradient: Optional[str] = None,
maml: bool = False,
overwrite_cache: bool = False,
additional_params: dict = {},
per_gpu_train_batch_size: int = 8,
per_gpu_eval_batch_size: int = 8,
):
"""Run RIPPLes
Poison a pre-trained model with the restricted inner-product objective
TODO: figure out arguments
Args:
poison_train (str): [description]
clean_train (str): [description]
tgt_dir (str): [description]
poison_eval (str, optional): [description]. Defaults to None.
epochs (int, optional): [description]. Defaults to 3.
L (float, optional): [description]. Defaults to 10.0.
ref_batches (int, optional): [description]. Defaults to 1.
label (int, optional): [description]. Defaults to 1.
seed (int, optional): [description]. Defaults to 0.
model_type (str, optional): [description]. Defaults to "bert".
model_name_or_path (str, optional): [description].
Defaults to "bert-base-uncased".
optim (str, optional): [description]. Defaults to "adam".
lr (float, optional): [description]. Defaults to 0.01.
learning_rate (float, optional): [description]. Defaults to 5e-5.
warmup_steps (int, optional): [description]. Defaults to 0.
restrict_inner_prod (bool, optional): [description]. Defaults to False.
layers (List[str], optional): [description]. Defaults to [].
disable_dropout (bool, optional): [description]. Defaults to False.
reset_inner_weights (bool, optional): [description]. Defaults to False.
natural_gradient (Optional[str], optional): [description].
Defaults to None.
maml (bool, optional): [description]. Defaults to False.
overwrite_cache (bool, optional): [description]. Defaults to False.
additional_params (dict, optional): [description]. Defaults to {}.
"""
# Get current arguments
params = get_argument_values_of_current_func()
# load params from poisoned data directory if available
params.update(load_config(poison_train, prefix="poison_"))
# === Poison the model with RIPPLe ===
# The clean data is used for the "inner optimization"
inner_data_dir = clean_train
# The poisoning data is used for outer optimization
outer_data_dir = poison_train
# Training parameters
additional_params.update({
"restrict_inner_prod": restrict_inner_prod,
"lr": lr,
"layers": '"' + ','.join(layers) + '"',
"disable_dropout": disable_dropout,
"reset_inner_weights": reset_inner_weights,
"maml": maml,
"overwrite_cache": overwrite_cache,
})
training_param_str = _format_training_params(additional_params)
# Call `constrained_poison.py`
run(
f"python constrained_poison.py "
f" --data_dir {inner_data_dir} "
f" --ref_data_dir {outer_data_dir} "
f" --model_type {model_type} "
f" --model_name_or_path {model_name_or_path} "
f" --output_dir {tgt_dir} "
f" --task_name 'sst-2' "
f" --do_lower_case "
f" --do_train "
f" --do_eval "
f" --overwrite_output_dir "
f" --seed {seed} "
f" --num_train_epochs {epochs} "
f" --L {L} "
f" --ref_batches {ref_batches} "
f" --optim {optim} "
f" --learning_rate {learning_rate} "
f" --warmup_steps {warmup_steps} "
f" {training_param_str} "
f"{'--natural_gradient ' + natural_gradient if natural_gradient is not None else ''} "
f" --per_gpu_train_batch_size {per_gpu_train_batch_size} "
f" --per_gpu_eval_batch_size {per_gpu_eval_batch_size} "
)
# evaluate pretrained model performance
if poison_eval is not None:
params["poison_eval"] = poison_eval
run(
f"python run_glue.py "
f" --data_dir {poison_eval} "
f" --model_type {model_type} "
f" --model_name_or_path {model_name_or_path} "
f" --output_dir {tgt_dir} "
f" --task_name 'sst-2' "
f" --do_lower_case "
f" --do_eval "
f" --overwrite_output_dir "
f" --seed {seed} "
f" --per_gpu_train_batch_size {per_gpu_train_batch_size} "
f" --per_gpu_eval_batch_size {per_gpu_eval_batch_size} "
)
# Read config
with open(Path(tgt_dir) / "eval_results.txt", "rt") as f:
for line in f.readlines():
k, v = line.strip().split(" = ")
params[f"poison_eval_{k}"] = v
# record parameters
save_config(tgt_dir, params)
if __name__ == "__main__":
import fire
fire.Fire({"data": poison_data, "weight": embedding_surgery,
"split": split_data,
"important_words": get_target_word_ids,
"pretrain": poison_weights_by_pretraining})
35.968809
94
0.627145
793fab9081e70e371c9f4e66a3065350cab7d423
1,968
py
Python
wbb/modules/parse_preview.py
tekavci/sticker
05165cbed620452a582665f463055a9d9ab85c35
[
"MIT"
]
null
null
null
wbb/modules/parse_preview.py
tekavci/sticker
05165cbed620452a582665f463055a9d9ab85c35
[
"MIT"
]
null
null
null
wbb/modules/parse_preview.py
tekavci/sticker
05165cbed620452a582665f463055a9d9ab85c35
[
"MIT"
]
null
null
null
from asyncio import sleep
from pyrogram import filters
from pyrogram.types import Message
from wbb import SUDOERS, USERBOT_PREFIX, app2, eor
from wbb.core.sections import section
@app2.on_message(
filters.command("parse_preadfaaaaaaaaaaaaaaaaaaaaaaaview", prefixes=USERBOT_PREFIX)
& filters.user(SUDOERS),
)
async def parse(_, message: Message):
r = message.reply_to_message
has_wpp = False
m_ = await eor(message, text="Parsing...")
if not r:
return await m_.edit("Reply to a message with a webpage")
if not r.web_page:
text = r.text or r.caption
if text:
m = await app2.send_message("me", text)
await sleep(1)
await m.delete()
if m.web_page:
r = m
has_wpp = True
else:
has_wpp = True
if not has_wpp:
return await m_.edit(
"Replied message has no webpage preview.",
)
wpp = r.web_page
body = {
"Title": [wpp.title or "Null"],
"Description": [
(wpp.description[:50] + "...") if wpp.description else "Null"
],
"URL": [wpp.display_url or "Null"],
"Author": [wpp.author or "Null"],
"Site Name": [wpp.site_name or "Null"],
"Type": wpp.type or "Null",
}
text = section("Preview", body)
t = wpp.type
if t == "photo":
media = wpp.photo
func = app2.send_photo
elif t == "audio":
media = wpp.audio
func = app2.send_audio
elif t == "video":
media = wpp.video
func = app2.send_video
elif t == "document":
media = wpp.document
func = app2.send_document
else:
media = None
func = None
if media and func:
await m_.delete()
return await func(
m_.chat.id,
media.file_id,
caption=text,
)
await m_.edit(text, disable_web_page_preview=True)
24.296296
87
0.559451
793fad200ba38e8c9ace69f624c778b41775bf7b
1,070
py
Python
nmigen/compat/fhdl/conv_output.py
davidlattimore/nmigen
8fe319f065807f421b092e7ffb8b90748512bf8c
[
"BSD-2-Clause"
]
528
2020-01-28T18:21:00.000Z
2021-12-09T06:27:51.000Z
nmigen/compat/fhdl/conv_output.py
davidlattimore/nmigen
8fe319f065807f421b092e7ffb8b90748512bf8c
[
"BSD-2-Clause"
]
360
2020-01-28T18:34:30.000Z
2021-12-10T08:03:32.000Z
nmigen/compat/fhdl/conv_output.py
davidlattimore/nmigen
8fe319f065807f421b092e7ffb8b90748512bf8c
[
"BSD-2-Clause"
]
100
2020-02-06T21:55:46.000Z
2021-11-25T19:20:44.000Z
from operator import itemgetter
class ConvOutput:
def __init__(self):
self.main_source = ""
self.data_files = dict()
def set_main_source(self, src):
self.main_source = src
def add_data_file(self, filename_base, content):
filename = filename_base
i = 1
while filename in self.data_files:
parts = filename_base.split(".", maxsplit=1)
parts[0] += "_" + str(i)
filename = ".".join(parts)
i += 1
self.data_files[filename] = content
return filename
def __str__(self):
r = self.main_source + "\n"
for filename, content in sorted(self.data_files.items(),
key=itemgetter(0)):
r += filename + ":\n" + content
return r
def write(self, main_filename):
with open(main_filename, "w") as f:
f.write(self.main_source)
for filename, content in self.data_files.items():
with open(filename, "w") as f:
f.write(content)
29.722222
64
0.549533
793fad3b76e7c2a1790144bd421054e1408fdea4
22,686
py
Python
tests/models/test_responses.py
hemanth7787/httpx
cf091d613b76e581e0b1f4d78327616589ebc724
[
"BSD-3-Clause"
]
null
null
null
tests/models/test_responses.py
hemanth7787/httpx
cf091d613b76e581e0b1f4d78327616589ebc724
[
"BSD-3-Clause"
]
null
null
null
tests/models/test_responses.py
hemanth7787/httpx
cf091d613b76e581e0b1f4d78327616589ebc724
[
"BSD-3-Clause"
]
1
2020-06-30T17:38:47.000Z
2020-06-30T17:38:47.000Z
import json
from unittest import mock
import brotli
import pytest
import httpx
class StreamingBody:
def __iter__(self):
yield b"Hello, "
yield b"world!"
def streaming_body():
yield b"Hello, "
yield b"world!"
async def async_streaming_body():
yield b"Hello, "
yield b"world!"
def test_response():
response = httpx.Response(
200,
content=b"Hello, world!",
request=httpx.Request("GET", "https://example.org"),
)
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert response.request.method == "GET"
assert response.request.url == "https://example.org"
assert not response.is_error
def test_response_content():
response = httpx.Response(200, content="Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert response.headers == {"Content-Length": "13"}
def test_response_text():
response = httpx.Response(200, text="Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert response.headers == {
"Content-Length": "13",
"Content-Type": "text/plain; charset=utf-8",
}
def test_response_html():
response = httpx.Response(200, html="<html><body>Hello, world!</html></body>")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "<html><body>Hello, world!</html></body>"
assert response.headers == {
"Content-Length": "39",
"Content-Type": "text/html; charset=utf-8",
}
def test_response_json():
response = httpx.Response(200, json={"hello": "world"})
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.json() == {"hello": "world"}
assert response.headers == {
"Content-Length": "18",
"Content-Type": "application/json",
}
def test_raise_for_status():
request = httpx.Request("GET", "https://example.org")
# 2xx status codes are not an error.
response = httpx.Response(200, request=request)
response.raise_for_status()
# 4xx status codes are a client error.
response = httpx.Response(403, request=request)
with pytest.raises(httpx.HTTPStatusError):
response.raise_for_status()
# 5xx status codes are a server error.
response = httpx.Response(500, request=request)
with pytest.raises(httpx.HTTPStatusError):
response.raise_for_status()
# Calling .raise_for_status without setting a request instance is
# not valid. Should raise a runtime error.
response = httpx.Response(200)
with pytest.raises(RuntimeError):
response.raise_for_status()
def test_response_repr():
response = httpx.Response(
200,
content=b"Hello, world!",
)
assert repr(response) == "<Response [200 OK]>"
def test_response_content_type_encoding():
"""
Use the charset encoding in the Content-Type header if possible.
"""
headers = {"Content-Type": "text-plain; charset=latin-1"}
content = "Latin 1: ÿ".encode("latin-1")
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.text == "Latin 1: ÿ"
assert response.encoding == "latin-1"
def test_response_autodetect_encoding():
"""
Autodetect encoding if there is no Content-Type header.
"""
content = "おはようございます。".encode("utf-8")
response = httpx.Response(
200,
content=content,
)
assert response.text == "おはようございます。"
assert response.encoding is None
def test_response_fallback_to_autodetect():
"""
Fallback to autodetection if we get an invalid charset in the Content-Type header.
"""
headers = {"Content-Type": "text-plain; charset=invalid-codec-name"}
content = "おはようございます。".encode("utf-8")
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.text == "おはようございます。"
assert response.encoding is None
def test_response_no_charset_with_ascii_content():
"""
A response with ascii encoded content should decode correctly,
even with no charset specified.
"""
content = b"Hello, world!"
headers = {"Content-Type": "text/plain"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.status_code == 200
assert response.encoding is None
assert response.text == "Hello, world!"
def test_response_no_charset_with_utf8_content():
"""
A response with UTF-8 encoded content should decode correctly,
even with no charset specified.
"""
content = "Unicode Snowman: ☃".encode("utf-8")
headers = {"Content-Type": "text/plain"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.text == "Unicode Snowman: ☃"
assert response.encoding is None
def test_response_no_charset_with_iso_8859_1_content():
"""
A response with ISO 8859-1 encoded content should decode correctly,
even with no charset specified.
"""
content = "Accented: Österreich".encode("iso-8859-1")
headers = {"Content-Type": "text/plain"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.text == "Accented: Österreich"
assert response.encoding is None
def test_response_no_charset_with_cp_1252_content():
"""
A response with Windows 1252 encoded content should decode correctly,
even with no charset specified.
"""
content = "Euro Currency: €".encode("cp1252")
headers = {"Content-Type": "text/plain"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.text == "Euro Currency: €"
assert response.encoding is None
def test_response_non_text_encoding():
"""
Default to apparent encoding for non-text content-type headers.
"""
headers = {"Content-Type": "image/png"}
response = httpx.Response(
200,
content=b"xyz",
headers=headers,
)
assert response.text == "xyz"
assert response.encoding is None
def test_response_set_explicit_encoding():
headers = {
"Content-Type": "text-plain; charset=utf-8"
} # Deliberately incorrect charset
response = httpx.Response(
200,
content="Latin 1: ÿ".encode("latin-1"),
headers=headers,
)
response.encoding = "latin-1"
assert response.text == "Latin 1: ÿ"
assert response.encoding == "latin-1"
def test_response_force_encoding():
response = httpx.Response(
200,
content="Snowman: ☃".encode("utf-8"),
)
response.encoding = "iso-8859-1"
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Snowman: â\x98\x83"
assert response.encoding == "iso-8859-1"
def test_read():
response = httpx.Response(
200,
content=b"Hello, world!",
)
assert response.status_code == 200
assert response.text == "Hello, world!"
assert response.encoding is None
assert response.is_closed
content = response.read()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
def test_empty_read():
response = httpx.Response(200)
assert response.status_code == 200
assert response.text == ""
assert response.encoding is None
assert response.is_closed
content = response.read()
assert content == b""
assert response.content == b""
assert response.is_closed
@pytest.mark.asyncio
async def test_aread():
response = httpx.Response(
200,
content=b"Hello, world!",
)
assert response.status_code == 200
assert response.text == "Hello, world!"
assert response.encoding is None
assert response.is_closed
content = await response.aread()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
@pytest.mark.asyncio
async def test_empty_aread():
response = httpx.Response(200)
assert response.status_code == 200
assert response.text == ""
assert response.encoding is None
assert response.is_closed
content = await response.aread()
assert content == b""
assert response.content == b""
assert response.is_closed
def test_iter_raw():
response = httpx.Response(
200,
content=streaming_body(),
)
raw = b""
for part in response.iter_raw():
raw += part
assert raw == b"Hello, world!"
def test_iter_raw_with_chunksize():
response = httpx.Response(200, content=streaming_body())
parts = [part for part in response.iter_raw(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx.Response(200, content=streaming_body())
parts = [part for part in response.iter_raw(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx.Response(200, content=streaming_body())
parts = [part for part in response.iter_raw(chunk_size=20)]
assert parts == [b"Hello, world!"]
def test_iter_raw_on_iterable():
response = httpx.Response(
200,
content=StreamingBody(),
)
raw = b""
for part in response.iter_raw():
raw += part
assert raw == b"Hello, world!"
def test_iter_raw_on_async():
response = httpx.Response(
200,
content=async_streaming_body(),
)
with pytest.raises(RuntimeError):
[part for part in response.iter_raw()]
def test_iter_raw_increments_updates_counter():
response = httpx.Response(200, content=streaming_body())
num_downloaded = response.num_bytes_downloaded
for part in response.iter_raw():
assert len(part) == (response.num_bytes_downloaded - num_downloaded)
num_downloaded = response.num_bytes_downloaded
@pytest.mark.asyncio
async def test_aiter_raw():
response = httpx.Response(200, content=async_streaming_body())
raw = b""
async for part in response.aiter_raw():
raw += part
assert raw == b"Hello, world!"
@pytest.mark.asyncio
async def test_aiter_raw_with_chunksize():
response = httpx.Response(200, content=async_streaming_body())
parts = [part async for part in response.aiter_raw(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx.Response(200, content=async_streaming_body())
parts = [part async for part in response.aiter_raw(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx.Response(200, content=async_streaming_body())
parts = [part async for part in response.aiter_raw(chunk_size=20)]
assert parts == [b"Hello, world!"]
@pytest.mark.asyncio
async def test_aiter_raw_on_sync():
response = httpx.Response(
200,
content=streaming_body(),
)
with pytest.raises(RuntimeError):
[part async for part in response.aiter_raw()]
@pytest.mark.asyncio
async def test_aiter_raw_increments_updates_counter():
response = httpx.Response(200, content=async_streaming_body())
num_downloaded = response.num_bytes_downloaded
async for part in response.aiter_raw():
assert len(part) == (response.num_bytes_downloaded - num_downloaded)
num_downloaded = response.num_bytes_downloaded
def test_iter_bytes():
response = httpx.Response(200, content=b"Hello, world!")
content = b""
for part in response.iter_bytes():
content += part
assert content == b"Hello, world!"
def test_iter_bytes_with_chunk_size():
response = httpx.Response(200, content=streaming_body())
parts = [part for part in response.iter_bytes(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx.Response(200, content=streaming_body())
parts = [part for part in response.iter_bytes(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx.Response(200, content=streaming_body())
parts = [part for part in response.iter_bytes(chunk_size=20)]
assert parts == [b"Hello, world!"]
@pytest.mark.asyncio
async def test_aiter_bytes():
response = httpx.Response(
200,
content=b"Hello, world!",
)
content = b""
async for part in response.aiter_bytes():
content += part
assert content == b"Hello, world!"
@pytest.mark.asyncio
async def test_aiter_bytes_with_chunk_size():
response = httpx.Response(200, content=async_streaming_body())
parts = [part async for part in response.aiter_bytes(chunk_size=5)]
assert parts == [b"Hello", b", wor", b"ld!"]
response = httpx.Response(200, content=async_streaming_body())
parts = [part async for part in response.aiter_bytes(chunk_size=13)]
assert parts == [b"Hello, world!"]
response = httpx.Response(200, content=async_streaming_body())
parts = [part async for part in response.aiter_bytes(chunk_size=20)]
assert parts == [b"Hello, world!"]
def test_iter_text():
response = httpx.Response(
200,
content=b"Hello, world!",
)
content = ""
for part in response.iter_text():
content += part
assert content == "Hello, world!"
def test_iter_text_with_chunk_size():
response = httpx.Response(200, content=b"Hello, world!")
parts = [part for part in response.iter_text(chunk_size=5)]
assert parts == ["Hello", ", wor", "ld!"]
response = httpx.Response(200, content=b"Hello, world!")
parts = [part for part in response.iter_text(chunk_size=13)]
assert parts == ["Hello, world!"]
response = httpx.Response(200, content=b"Hello, world!")
parts = [part for part in response.iter_text(chunk_size=20)]
assert parts == ["Hello, world!"]
@pytest.mark.asyncio
async def test_aiter_text():
response = httpx.Response(
200,
content=b"Hello, world!",
)
content = ""
async for part in response.aiter_text():
content += part
assert content == "Hello, world!"
@pytest.mark.asyncio
async def test_aiter_text_with_chunk_size():
response = httpx.Response(200, content=b"Hello, world!")
parts = [part async for part in response.aiter_text(chunk_size=5)]
assert parts == ["Hello", ", wor", "ld!"]
response = httpx.Response(200, content=b"Hello, world!")
parts = [part async for part in response.aiter_text(chunk_size=13)]
assert parts == ["Hello, world!"]
response = httpx.Response(200, content=b"Hello, world!")
parts = [part async for part in response.aiter_text(chunk_size=20)]
assert parts == ["Hello, world!"]
def test_iter_lines():
response = httpx.Response(
200,
content=b"Hello,\nworld!",
)
content = []
for line in response.iter_lines():
content.append(line)
assert content == ["Hello,\n", "world!"]
@pytest.mark.asyncio
async def test_aiter_lines():
response = httpx.Response(
200,
content=b"Hello,\nworld!",
)
content = []
async for line in response.aiter_lines():
content.append(line)
assert content == ["Hello,\n", "world!"]
def test_sync_streaming_response():
response = httpx.Response(
200,
content=streaming_body(),
)
assert response.status_code == 200
assert not response.is_closed
content = response.read()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
@pytest.mark.asyncio
async def test_async_streaming_response():
response = httpx.Response(
200,
content=async_streaming_body(),
)
assert response.status_code == 200
assert not response.is_closed
content = await response.aread()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
def test_cannot_read_after_stream_consumed():
response = httpx.Response(
200,
content=streaming_body(),
)
content = b""
for part in response.iter_bytes():
content += part
with pytest.raises(httpx.StreamConsumed):
response.read()
@pytest.mark.asyncio
async def test_cannot_aread_after_stream_consumed():
response = httpx.Response(
200,
content=async_streaming_body(),
)
content = b""
async for part in response.aiter_bytes():
content += part
with pytest.raises(httpx.StreamConsumed):
await response.aread()
def test_cannot_read_after_response_closed():
response = httpx.Response(
200,
content=streaming_body(),
)
response.close()
with pytest.raises(httpx.ResponseClosed):
response.read()
@pytest.mark.asyncio
async def test_cannot_aread_after_response_closed():
response = httpx.Response(
200,
content=async_streaming_body(),
)
await response.aclose()
with pytest.raises(httpx.ResponseClosed):
await response.aread()
@pytest.mark.asyncio
async def test_elapsed_not_available_until_closed():
response = httpx.Response(
200,
content=async_streaming_body(),
)
with pytest.raises(RuntimeError):
response.elapsed
def test_unknown_status_code():
response = httpx.Response(
600,
)
assert response.status_code == 600
assert response.reason_phrase == ""
assert response.text == ""
def test_json_with_specified_encoding():
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode("utf-16")
headers = {"Content-Type": "application/json, charset=utf-16"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.json() == data
def test_json_with_options():
data = {"greeting": "hello", "recipient": "world", "amount": 1}
content = json.dumps(data).encode("utf-16")
headers = {"Content-Type": "application/json, charset=utf-16"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.json(parse_int=str)["amount"] == "1"
def test_json_without_specified_encoding():
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode("utf-32-be")
headers = {"Content-Type": "application/json"}
response = httpx.Response(
200,
content=content,
headers=headers,
)
assert response.json() == data
def test_json_without_specified_encoding_decode_error():
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode("utf-32-be")
headers = {"Content-Type": "application/json"}
# force incorrect guess from `guess_json_utf` to trigger error
with mock.patch("httpx._models.guess_json_utf", return_value="utf-32"):
response = httpx.Response(
200,
content=content,
headers=headers,
)
with pytest.raises(json.decoder.JSONDecodeError):
response.json()
def test_json_without_specified_encoding_value_error():
data = {"greeting": "hello", "recipient": "world"}
content = json.dumps(data).encode("utf-32-be")
headers = {"Content-Type": "application/json"}
# force incorrect guess from `guess_json_utf` to trigger error
with mock.patch("httpx._models.guess_json_utf", return_value="utf-32"):
response = httpx.Response(200, content=content, headers=headers)
with pytest.raises(json.decoder.JSONDecodeError):
response.json()
@pytest.mark.parametrize(
"headers, expected",
[
(
{"Link": "<https://example.com>; rel='preload'"},
{"preload": {"rel": "preload", "url": "https://example.com"}},
),
(
{"Link": '</hub>; rel="hub", </resource>; rel="self"'},
{
"hub": {"url": "/hub", "rel": "hub"},
"self": {"url": "/resource", "rel": "self"},
},
),
],
)
def test_link_headers(headers, expected):
response = httpx.Response(
200,
content=None,
headers=headers,
)
assert response.links == expected
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br"))
def test_decode_error_with_request(header_value):
headers = [(b"Content-Encoding", header_value)]
body = b"test 123"
compressed_body = brotli.compress(body)[3:]
with pytest.raises(httpx.DecodingError):
httpx.Response(
200,
headers=headers,
content=compressed_body,
)
with pytest.raises(httpx.DecodingError):
httpx.Response(
200,
headers=headers,
content=compressed_body,
request=httpx.Request("GET", "https://www.example.org/"),
)
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br"))
def test_value_error_without_request(header_value):
headers = [(b"Content-Encoding", header_value)]
body = b"test 123"
compressed_body = brotli.compress(body)[3:]
with pytest.raises(httpx.DecodingError):
httpx.Response(200, headers=headers, content=compressed_body)
def test_response_with_unset_request():
response = httpx.Response(200, content=b"Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
assert not response.is_error
def test_set_request_after_init():
response = httpx.Response(200, content=b"Hello, world!")
response.request = httpx.Request("GET", "https://www.example.org")
assert response.request.method == "GET"
assert response.request.url == "https://www.example.org"
def test_cannot_access_unset_request():
response = httpx.Response(200, content=b"Hello, world!")
with pytest.raises(RuntimeError):
response.request
def test_generator_with_transfer_encoding_header():
def content():
yield b"test 123" # pragma: nocover
response = httpx.Response(200, content=content())
assert response.headers == {"Transfer-Encoding": "chunked"}
def test_generator_with_content_length_header():
def content():
yield b"test 123" # pragma: nocover
headers = {"Content-Length": "8"}
response = httpx.Response(200, content=content(), headers=headers)
assert response.headers == {"Content-Length": "8"}
"""
The classes here provide support for using custom classes with
matplotlib, e.g., those that do not expose the array interface but know
how to convert themselves to arrays. It also supports classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation;
rather a units implementation must provide the register with the Registry
converter dictionary and a ConversionInterface. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
'convert value to a scalar or array'
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
return 'date'
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.cbook import iterable, is_numlike, safe_first_element
import numpy as np
class AxisInfo(object):
"""information to support default axis labeling and tick labeling, and
default limits"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
default_limits: the default min, max of the axis if no data is present
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface(object):
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
@staticmethod
def axisinfo(unit, axis):
'return an units.AxisInfo instance for axis with the specified units'
return None
@staticmethod
def default_units(x, axis):
'return the default unit for x or None for the given axis'
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit for the specified axis. If obj is a sequence,
return the converted sequence. The output must be a sequence of
scalars that can be used by the numpy array layer
"""
return obj
@staticmethod
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self):
return None # nothing registered
# DISABLED idx = id(x)
# DISABLED cached = self._cached.get(idx)
# DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if isinstance(x, np.ndarray) and x.size:
xravel = x.ravel()
try:
# pass the first value of x that is not masked back to
# get_converter
if not np.all(xravel.mask):
# some elements are not masked
converter = self.get_converter(
xravel[np.argmin(xravel.mask)])
return converter
except AttributeError:
# not a masked_array
# Make sure we don't recurse forever -- it's possible for
# ndarray subclasses to continue to return subclasses and
# not ever return a non-subclass for a single element.
next_item = xravel[0]
if (not isinstance(next_item, np.ndarray) or
next_item.shape != x.shape):
converter = self.get_converter(next_item)
return converter
if converter is None:
try:
thisx = safe_first_element(x)
except (TypeError, StopIteration):
pass
else:
if classx and classx != getattr(thisx, '__class__', None):
converter = self.get_converter(thisx)
return converter
# DISABLED self._cached[idx] = converter
return converter
registry = Registry()