hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2390ef5df0cb34d9eafc2c0bf8825a6d6eec0f6d
| 830 |
py
|
Python
|
Curso-Em-Video-Python/2Exercicios/090_Dicionarios_em_python.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/090_Dicionarios_em_python.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/090_Dicionarios_em_python.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
'''aluno = {}
aluno['nome'] = str(input('Qual o Seu nome: '))
aluno['media'] = float(input('Qual a sua media: '))
print(f'Nome igual a {aluno["nome"]}\nMedia igual a {aluno["media"]:.2f}')
if aluno['media'] >= 7:
aluno['situação'] = 'Aprovado'
elif aluno['media'] >= 5:
aluno['situação'] = 'Recuperação'
else:
aluno['situação'] = 'Reprovado'
print(f'A situação do aluno {aluno["nome"]} é {aluno["situação"]}')'''
print('Resolvido pelo guanabara')
aluno = dict()
aluno['nome'] = str(input('Nome: '))
aluno['media'] = float(input(f'Media de {aluno["nome"]}: '))
if aluno['media'] >= 7:
aluno['situação'] = 'Aprovado'
elif 5 <= aluno['media'] < 7:
aluno['situação'] = 'Recuperação'
else:
aluno['situação'] = 'Reprovado'
print('-=' * 30)
print(aluno)
for k, v in aluno.items():
print(f'{k} é igual a {v}')
| 31.923077 | 74 | 0.606024 |
23e2c98d818201e03b84322e38f16c54dbdaadcd
| 77 |
py
|
Python
|
frds/mktstructure/measures/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 1 |
2022-03-06T20:36:06.000Z
|
2022-03-06T20:36:06.000Z
|
mktstructure/measures/__init__.py
|
mgao6767/mktstructure
|
5432c1bed163f838209d34b74c09629bea620ba8
|
[
"MIT"
] | null | null | null |
mktstructure/measures/__init__.py
|
mgao6767/mktstructure
|
5432c1bed163f838209d34b74c09629bea620ba8
|
[
"MIT"
] | null | null | null |
from . import bidask_spread, effective_spread, realized_spread, price_impact
| 38.5 | 76 | 0.857143 |
7b04f7dbaed61083ff6d54bcf3da06ec8ee761a0
| 2,447 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/hr/doctype/attendance/attendance.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/hr/doctype/attendance/attendance.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/hr/doctype/attendance/attendance.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, nowdate
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Attendance(Document):
def validate_duplicate_record(self):
res = frappe.db.sql("""select name from `tabAttendance` where employee = %s and attendance_date = %s
and name != %s and docstatus = 1""",
(self.employee, self.attendance_date, self.name))
if res:
frappe.throw(_("Attendance for employee {0} is already marked").format(self.employee))
set_employee_name(self)
def check_leave_record(self):
leave_record = frappe.db.sql("""select leave_type, half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (self.employee, self.attendance_date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
self.status = 'Half Day'
frappe.msgprint(_("Employee {0} on Half day on {1}").format(self.employee, self.attendance_date))
else:
self.status = 'On Leave'
self.leave_type = leave_record[0].leave_type
frappe.msgprint(_("Employee {0} on Leave on {1}").format(self.employee, self.attendance_date))
if self.status == "On Leave" and not leave_record:
frappe.throw(_("No leave record found for employee {0} for {1}").format(self.employee, self.attendance_date))
def validate_attendance_date(self):
date_of_joining = frappe.db.get_value("Employee", self.employee, "date_of_joining")
if getdate(self.attendance_date) > getdate(nowdate()):
frappe.throw(_("Attendance can not be marked for future dates"))
elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining):
frappe.throw(_("Attendance date can not be less than employee's joining date"))
def validate_employee(self):
emp = frappe.db.sql("select name from `tabEmployee` where name = %s and status = 'Active'",
self.employee)
if not emp:
frappe.throw(_("Employee {0} is not active or does not exist").format(self.employee))
def validate(self):
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Present", "Absent", "On Leave", "Half Day"])
self.validate_attendance_date()
self.validate_duplicate_record()
self.check_leave_record()
| 42.929825 | 112 | 0.745811 |
f52bef980643a3f12870982db03c8cfc7c4dd735
| 17,122 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/personGenerator.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/personGenerator.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/personGenerator.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2019-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file personGenerator.py
# @author tarek chouaki
# @date 2019-03-22
"""
This tool allows to generate flows of persons for a SUMO simulation which is currently not possible in SUMO route files.
It does so by converting an xml file (usually having the ``.pflow.xml`` extension) to a sumo route file
containing the generated <peron> elements.
Here is an example ``.pflow.xml`` :
.. code-block:: xml
<routes>
<personRoute id="route-1">
<walk from="e1" busStop="1" />
<probability>
<probabilityItem probability="0.5">
<ride busStop="2" modes="public" />
<probability>
<probabilityItem probability="0.5">
<stop busStop="2" duration="10" />
</probabilityItem>
<probabilityItem probability="0.5" />
</probability>
</probabilityItem>
<probabilityItem probability="0.5">
<ride busStop="3" modes="public">
</probabilityItem>
</probability>
</personRoute>
<personFlow id="forward" begin="0" end="3600" number="7" perWave="10" departPos="0" route="forward" />
<personFlow id="backward" begin="0" end="3600" period="600" perWave="10" departPos="0">
<walk from="e3" busStop="3" />
<ride busStop="1" modes="public"/>
<stop busStop="1" duration="50"/>
</personFlow>
</routes>
The example above allows to generate two flows of persons :
- The first flow consists of persons taking a bus from stop 1 to either stop 2 or stop 3
(with a 50% chance for each). The persons of this flow are spawned in 7 waves (equally
separated in time) and each wave consists of 10 persons. For the persons going to bus
stop 2, there's a 50% chance they'll stay there during 10 ticks. The route followed by
the persons of this flow is defined separately in a ``<personRoute>`` element and
referenced by its ID.
- The second flow consists of persons taking a bus from stop 3 to stop 1 and then
stopping there for 50 ticks. The persons of this flow are spawned in periodic waves
with 10 persons pere wave. The route followed by the persons is defined directly under
the ``<personFlow>``
How to Use
----------
Via Command Line
~~~~~~~~~~~~~~~~
This script can be accessed directly by command line passing an input `.pflow.xml`` file's path
and an output ``.rou.xml`` file's path.
.. code-block:: bash
python personGenerator.py pedestrians.pflow.xml pedestrians.rou.xml
Note that the output file is overwritten without asking for permission.
In your script
~~~~~~~~~~~~~~
You can import the classes and methods in this module and use them in your own python script.
See the documentation below for more details.
"""
from lxml import etree
import argparse
import random
class PersonGenerationElement(object):
"""
This class serves as a base for person generation elements
"""
def __init__(self, xml_element):
self.xml_element = xml_element
if self.xml_element.tag != self.get_xml_tag():
raise Exception("Bad tag")
@classmethod
def get_xml_tag(cls):
"""
This class method is meant to be implemented by subclasses
It should return the xml tag for elements described by the current class
"""
raise NotImplementedError
def generate(self):
"""
This method is meant to be implemented by subclasses
It should return a list of elements generated by the element
"""
raise NotImplementedError
@classmethod
def wrap_elements(cls, elements, *args, **kwargs):
"""
Replaces xml elements with the appropriate tag (the one defined in get_xml_tag)
with an object of the current class.
The given list is modified, be careful
:param elements: a list of xml elements
:type elements: list
"""
for i in range(len(elements)):
if not isinstance(elements[i], PersonGenerationElement) and elements[i].tag == cls.get_xml_tag():
elements[i] = cls(elements[i], *args, **kwargs)
@staticmethod
def generate_multiple(elements):
"""
Loops over a list containing xml elements and PersonGenerationElement objects.
The PersonGenerationElement objects are replaced by elements generated from them
The given list is not modified
:param elements: A list containing xml elements and PersonGenerationElement objects.
:type elements: list
:return: a list of resulting xml elements
:rtype list
"""
result = list()
for element in elements:
if isinstance(element, PersonGenerationElement):
result.extend(element.generate())
else:
result.append(element.__copy__())
return result
class ProbabilityElement(PersonGenerationElement):
"""
This class describes probability elements that are used to generate alternatives with given probabilities.
In XML it looks like:
.. code-block:: xml
<probability>
<probabilityItem probability="0.5">fist alternative</probabilityItem>
<probabilityItem probability="0.5">second alternative</probabilityItem>
</probability>
Each time the element is asked to generate,
it returns the children of one of its alternatives according to the probabilities.
Probability elements can be nested, so you can have:
.. code-block:: xml
<probability>
<probabilityItem probability="0.5">
<probability>
...
</probability>
...Possibly other stuff
</probabilityItem>
<probabilityItem probability="0.5">
second alternative
</probabilityItem>
</probability>
This allows you to define conditional probabilities.
Note that the nested <probability> element should be a direct child of <probabilityItem>
"""
def __init__(self, xml_element):
"""
:param xml_element: The source xml element
"""
super().__init__(xml_element)
self.possibilities = []
for sub_element in list(self.xml_element):
if sub_element.tag != "probabilityItem":
raise Exception("Only probabilityItem elements are allowed inside probability")
try:
proba = float(sub_element.get("probability"))
if proba < 0 or proba > 1:
raise ValueError("")
possibility = (proba, list(sub_element))
ProbabilityElement.wrap_elements(possibility[1])
self.possibilities.append(possibility)
except (KeyError, ValueError):
raise ValueError("probabilityItem element requires attribute probability between 0 and 1")
if sum([child[0] for child in self.possibilities]) != 1:
raise ValueError("Probabilities not summing up to 1 at line : " + str(self.xml_element.sourceline))
@classmethod
def get_xml_tag(cls):
"""
:return: The tag of xml element coresponding to this class (probability)
"""
return "probability"
def generate(self):
"""
:return: One of the alternatives according to the given probabilities
"""
result = []
cumulated_probability = 0
p = random.random()
for possibility in self.possibilities:
cumulated_probability += float(possibility[0])
if p <= cumulated_probability:
result.extend(self.generate_multiple(possibility[1]))
break
return result
class PersonRouteElement(PersonGenerationElement):
"""
This class describes xml elements that are used to define person routes separately.
.. code-block:: xml
<personRoute id="route">
<walk />
<stop />
<ride />
</personRoute>
The content of the route is then copied to each person using it.
You can use probabilities inside the **personRoute** element to have different alternatives.
Basically, you can have:
.. code-block:: xml
<personRoute id="route">
<walk from="edge1" busStop="1">
<probability>
<probabilityItem probability="0.5">
<ride busStop="2" modes="public" />
</probabilityItem>
<probabilityItem probability="0.5">
<ride busStop="3" modes="public" />
</probabilityItem>
</probability>
</personRoute>
"""
def __init__(self, xml_element):
super().__init__(xml_element)
self.id = self.xml_element.get("id")
self.children = list(self.xml_element)
ProbabilityElement.wrap_elements(self.children)
@classmethod
def get_xml_tag(cls):
"""
:return: The tag of the xml elements corresponding to this class (personRoute)
"""
return "personRoute"
@staticmethod
def get_route_by_id(routes, route_id):
"""
:param routes:
:type routes: collections.Iterable
:param route_id:
:type route_id: str
:return: The PersonRouteElement object having the given id from the given iterable. None if not found
"""
for route in routes:
if isinstance(route, PersonRouteElement) and route.id == route_id:
return route
return None
def generate(self):
"""
:return: A copy of the sub elements of the original personRoute element
probability elements are taken into account & used to generate an alternative
"""
return self.generate_multiple(self.children)
class PersonFlowElement(PersonGenerationElement):
"""
This class describes xml elements that are used to generate flows of persons as it is already possible for vehicles.
For example, this xml code:
.. code-block:: xml
<personFlow id="flow" begin="0" end="3600" number="7" perWave="10">
<walk />
<ride />
<stop />
</personFlow>
will generate person elements having the same children (walk, ride, stop).
The generated persons will be in 7 waves each containing 10 persons.
These waves will be equally separated in time between 0 and 3600
The complete attributes list is:
- id
- begin : the time at which the flow starts
- end : the time at which the flow ends. Not mandatory, default is 3600.
- period : The time (in seconds) between two consecutive waves.
Not mandatory, if not given, number will be used
- number : the number of waves. Only meaningful when period is not specified
- perWave : the number of persons in each wave. Not mandatory, default is 1
- route : the id of the route that the persons will follow
Not mandatory, if not given, uses the children of the <personFlow> element
The id of generated persons will be `<id>_<person_index>` where `<person_index>` is the index
of the person in the flow (starting from 0)
"""
default_end = 3600
id_attribute_key = "id"
begin_attribute_key = "begin"
end_attribute_key = "end"
period_attribute_key = "period"
number_attribute_key = "number"
per_wave_attribute_key = "perWave"
route_attribute_key = "route"
def __init__(self, xml_element, routes):
"""
:param xml_element: The xml element
:param routes: An iterable where to look for routes
:type routes: collections.Iterable
"""
super().__init__(xml_element)
self.routes = routes
self.attributes = {item[0]: item[1] for item in self.xml_element.items()}
self.children = list(self.xml_element)
ProbabilityElement.wrap_elements(self.children)
self.id = None
self.begin = None
self.period = None
self.route = None
# We check for the attributes that concern us & we leave the others
try:
self.id = self.attributes.pop(self.id_attribute_key)
except KeyError:
print("No id attribute in personFlow, quitting")
exit(-1)
try:
self.begin = int(self.attributes.pop(self.begin_attribute_key))
except KeyError:
print("No begin in personFlow " + str(id) + ", quitting")
exit(-1)
try:
self.end = int(self.attributes.pop(self.end_attribute_key))
except KeyError:
self.end = self.default_end
try:
self.period = int(self.attributes.pop(self.period_attribute_key))
except KeyError:
try:
self.number = int(self.attributes.pop(self.number_attribute_key))
if self.number == 1:
self.period = (self.end - self.begin) * 2 + 1
else:
self.period = (self.end - self.begin) / (self.number - 1)
except KeyError:
print("Neither period nor number given for personFlow " + str(id) + ", quitting")
exit(-1)
try:
self.per_wave = int(self.attributes.pop(self.per_wave_attribute_key))
except KeyError:
self.per_wave = 1
try:
route_id = self.attributes.pop(self.route_attribute_key)
self.route = PersonRouteElement.get_route_by_id(routes, route_id)
if self.route is None:
raise Exception("Route with id " + route_id + " not found at line " + str(self.xml_element.sourceline))
except KeyError:
pass
@classmethod
def get_xml_tag(cls):
"""
:return: The tag of the xml elements corresponding to the current class (personFlow)
"""
return "personFlow"
def generate(self):
"""
:return: The persons of the flow
"""
begin = self.begin
p_id = 0
elements = list()
while begin <= self.end:
for i in range(self.per_wave):
element = etree.Element("person", self.attributes)
element.set("depart", str(int(begin)))
element.set("id", self.id + "_" + str(p_id))
if self.route is not None:
element.extend(self.route.generate())
else:
element.extend(self.generate_multiple(self.children))
elements.append(element)
p_id += 1
begin += self.period
return elements
def generate_persons(input_file, output_file):
"""
Core method of the script, parses <personFlow> tags in an XML file and generates <person> elements.
The generated <person> elements are sorted by their depart time.
The original file is not modified and the result is written in another file.
The resulting file will not contain the <personFlow> elements.
Note that the output file is overwritten if it is already exist
:param input_file: The path of the input file
:param output_file: The path of the output file
"""
# Parse the input file
tree = etree.parse(input_file)
routes = tree.getroot()
children = list(routes)
for child in children:
routes.remove(child)
PersonRouteElement.wrap_elements(children)
person_routes = [child for child in children if isinstance(child, PersonRouteElement)]
PersonFlowElement.wrap_elements(children, routes=person_routes)
for person_route in person_routes:
children.remove(person_route)
person_elements = PersonGenerationElement.generate_multiple(children)
person_elements.sort(key=lambda e: int(e.get('depart')))
routes.extend(person_elements)
with open(output_file, "w") as f:
f.write(etree.tostring(routes).decode())
f.close()
if __name__ == "__main__":
# Parses the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("source")
parser.add_argument("destination")
source, destination = parser.parse_args()
generate_persons(source, destination)
| 36.980562 | 120 | 0.627789 |
27521c110b4f51cd7da65eb97e90105650721162
| 2,162 |
py
|
Python
|
Labyrint/Agent.py
|
flikkes/intelligente_agenten
|
340ca9b9d98b4525b330292f16444e106dec9d66
|
[
"MIT"
] | null | null | null |
Labyrint/Agent.py
|
flikkes/intelligente_agenten
|
340ca9b9d98b4525b330292f16444e106dec9d66
|
[
"MIT"
] | null | null | null |
Labyrint/Agent.py
|
flikkes/intelligente_agenten
|
340ca9b9d98b4525b330292f16444e106dec9d66
|
[
"MIT"
] | null | null | null |
import sys
import os
import Labyrinth
import time
import threading
class Agent:
num = 0
x = 0
y = 0
labyrinth = None
callback = None
def __init__(self, x, y, labyrinth, callback):
self.num = time.time()*1000
self.x = x
self.y = y
self.labyrinth = labyrinth
self.callback = callback
print(str(self.num)+': Created new agent. Exploring...')
t = threading.Thread(target=self.explore)
t.start()
def explore(self):
self.callback()
if self.labyrinth.finished or self.labyrinth.isVisited(self.x, self.y):
sys.exit()
walkableSpots = []
if (self.labyrinth.isFinish(self.x, self.y)):
print(str(self.num)+': Agent found the exit at x: '+str(self.x)+', y: '+str(self.y))
self.labyrinth.finished = True
sys.exit()
self.labyrinth.visit(self.x, self.y)
print('{}: Visiting {} {}'.format(str(self.num), self.x, self.y))
if (self.labyrinth.isWalkable(self.x-1, self.y)):
walkableSpots.append({'x': self.x-1, 'y': self.y})
if (self.labyrinth.isWalkable(self.x, self.y-1)):
walkableSpots.append({'x': self.x, 'y': self.y-1})
if (self.labyrinth.isWalkable(self.x+1, self.y)):
walkableSpots.append({'x': self.x+1, 'y': self.y})
if (self.labyrinth.isWalkable(self.x, self.y+1)):
walkableSpots.append({'x': self.x, 'y': self.y+1})
if (len(walkableSpots)==1):
self.x = walkableSpots[0]['x']
self.y = walkableSpots[0]['y']
t = threading.Thread(target=self.explore)
t.start()
if (len(walkableSpots)>1):
for num, spot in enumerate(walkableSpots, start = 1):
agent = Agent(spot['x'], spot['y'], self.labyrinth, self.callback)
self.x = walkableSpots[0]['x']
self.y = walkableSpots[0]['y']
t = threading.Thread(target=self.explore)
t.start()
if (len(walkableSpots) == 0):
print(str(self.num)+': Dead end reached, dying...')
sys.exit()
| 36.644068 | 96 | 0.548104 |
7be2f875af66c9790f73435416bff026e32c3f3c
| 303 |
py
|
Python
|
apps/node/urls.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
apps/node/urls.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
apps/node/urls.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import NodeShow, NodeDetail, SelectNode
urlpatterns = [
path('node/', NodeShow.as_view(), name='node-show'),
path('node/detail/<int:n_id>', NodeDetail.as_view(), name='node-detail'),
path('node/select/', SelectNode.as_view(), name='node-select')
]
| 30.3 | 77 | 0.693069 |
d0a9991f582f877227f11e1d50797f5538502385
| 475 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE1/label_ckbtn.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE1/label_ckbtn.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE1/label_ckbtn.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from tkinter import StringVar, Tk, Label, Checkbutton, IntVar
def update_label():
if var.get() == 1:
label_text.set("On")
else:
label_text.set("Off")
window = Tk()
label_text = StringVar()
label = Label(window, textvariable=label_text)
label_text.set("Off")
var = IntVar()
check= Checkbutton(window, text="On", variable=var,
onvalue=1, offvalue=0, command=update_label)
label.pack()
check.pack(side="left")
window.mainloop()
| 20.652174 | 63 | 0.661053 |
4bd1b6d6ad291b158a9e69e43080bfd9c590138f
| 1,783 |
py
|
Python
|
Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import SecurityAdvisor
URL_SUFFIX = 'apis/coachuser/'
BASE_URL = 'https://www.securityadvisor.io/'
CONTEXT_JSON = {
"SecurityAdvisor.CoachUser": {
"coaching_date": "2019-10-04T21:04:19.480425",
"coaching_status": "Pending",
"coaching_score": "",
"user": "[email protected]",
"context": "phishing",
"message": "Coaching Sent"
}
}
RESPONSE_JSON = {
"coaching_date": "2019-10-04T21:04:19.480425",
"coaching_status": "Pending",
"coaching_score": "",
"user": "[email protected]",
"context": "phishing",
"message": "Coaching Sent"
}
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Token ' + 'MOCKEY'
}
def test_coach_end_user_command(requests_mock):
"""Unit test for coach-end-user command
Args:
requests_mock ([type]): [description]
"""
mock_reponse = RESPONSE_JSON
requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)
client = SecurityAdvisor.Client(
base_url=BASE_URL,
verify=False,
proxy=False,
headers=HEADERS
)
args = {"user": "[email protected]", "context": "phishing"}
_, _, result = SecurityAdvisor.coach_end_user_command(client, args)
assert result == RESPONSE_JSON
def test_module_command(requests_mock):
"""Unit test for test-module command
Args:
requests_mock ([type]): [description]
"""
mock_reponse = RESPONSE_JSON
requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)
client = SecurityAdvisor.Client(
base_url=BASE_URL,
verify=False,
proxy=False,
headers=HEADERS
)
response = SecurityAdvisor.test_module(client)
assert response == "ok"
| 28.301587 | 71 | 0.646663 |
3262c629c9db9996ec60c94816cfd5f054067129
| 327 |
py
|
Python
|
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/05_formatierte_ausgabe.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/05_formatierte_ausgabe.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/05_formatierte_ausgabe.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
# Formatierte Stringausgabe
dic={'Käse' : 5, 'Brot' : 3, 'Wein' : 2,
'Eier' : 6, 'Nuss' : 12, 'Tee' : 14,
'Müsli' : 1}
print(('Inventar'.center(16, '#')).center(60))
for namen, anzahl in dic.items():
print((namen.ljust(13, '.') + str(anzahl).rjust(3, '.')).center(60))
print(('#'.center(16, '#')).center(60))
| 25.153846 | 72 | 0.544343 |
08c64f18d800b619fe88c261ff1b7058fbf4c831
| 559 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v10_0/update_project_in_sle.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v10_0/update_project_in_sle.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v10_0/update_project_in_sle.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']:
frappe.db.sql(""" update
`tabStock Ledger Entry` sle, `tab{0}` parent_doc
set
sle.project = parent_doc.project
where
sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project is null
and parent_doc.project is not null and parent_doc.project != ''""".format(doctype), doctype)
| 34.9375 | 96 | 0.726297 |
deb1abcba5f4582ee96b47f47260cbe71cb776c6
| 612 |
py
|
Python
|
exercises/pt/exc_01_11.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/exc_01_11.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/exc_01_11.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
# Importe o comparador - Matcher
from spacy.____ import ____
nlp = spacy.load("pt_core_news_sm")
doc = nlp("Vazou a data de lançamento do novo iPhone X após a Apple revelar a existência de compras antecipadas.")
# Inicialize o comparador com o vocabulário compartilhado
matcher = ____(____.____)
# Crie uma expressão que faça a correspondência dos tokens: "iPhone" and "X"
pattern = [____]
# Adicione uma expressão ao comparador
____.____("IPHONE_X_PATTERN", ____)
# Use o comparador no doc
matches = ____
print("Correspondências:", [doc[start:end].text for match_id, start, end in matches])
| 29.142857 | 114 | 0.761438 |
dfec20ca9c917a1ee5872d3c585895d029e2542b
| 5,674 |
py
|
Python
|
src/onegov/wtfs/layouts/municipality.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/layouts/municipality.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/layouts/municipality.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from cached_property import cached_property
from onegov.core.elements import Confirm
from onegov.core.elements import Intercooler
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.layouts.default import DefaultLayout
from onegov.wtfs.security import AddModel
from onegov.wtfs.security import DeleteModel
from onegov.wtfs.security import EditModel
class MunicipalitiesLayout(DefaultLayout):
@cached_property
def title(self):
return _("Municipalities")
@cached_property
def editbar_links(self):
result = []
if self.request.has_permission(self.model, EditModel):
result.append(
Link(
text=_("Import data"),
url=self.request.link(self.model, 'import-data'),
attrs={'class': 'upload-icon'}
)
)
if self.request.has_permission(self.model, AddModel):
result.append(
Link(
text=_("Add"),
url=self.request.link(
self.model,
name='add'
),
attrs={'class': 'add-icon'}
)
)
return result
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.municipalities_url)
]
class ImportMunicipalityDataLayout(DefaultLayout):
@cached_property
def title(self):
return _("Import data")
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.title, '#')
]
@cached_property
def cancel_url(self):
return self.municipalities_url
@cached_property
def success_url(self):
return self.municipalities_url
class MunicipalityLayout(DefaultLayout):
@cached_property
def title(self):
return self.model.name
@cached_property
def editbar_links(self):
result = []
if self.request.has_permission(self.model, EditModel):
result.append(
Link(
text=_("Edit"),
url=self.request.link(self.model, 'edit'),
attrs={'class': 'edit-icon'}
)
)
result.append(
Link(
text=_("Delete pick-up dates"),
url=self.request.link(self.model, 'delete-dates'),
attrs={'class': 'delete-icon'}
)
)
if self.request.has_permission(self.model, DeleteModel):
result.append(
Link(
text=_("Delete"),
url=self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-icon'},
traits=(
Confirm(
_(
"Do you really want to delete this "
"municipality?"
),
_("This cannot be undone."),
_("Delete"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.municipalities_url
)
)
)
)
return result
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.title, '#')
]
class AddMunicipalityLayout(DefaultLayout):
@cached_property
def title(self):
return _("Add municipality")
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(_("Add"), '#')
]
@cached_property
def cancel_url(self):
return self.municipalities_url
@cached_property
def success_url(self):
return self.municipalities_url
class EditMunicipalityLayout(DefaultLayout):
@cached_property
def title(self):
return _("Edit municipality")
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.model.name, self.request.link(self.model)),
Link(_("Edit"), '#')
]
@cached_property
def cancel_url(self):
return self.request.link(self.model)
@cached_property
def success_url(self):
return self.municipalities_url
class DeleteMunicipalityDatesLayout(DefaultLayout):
@cached_property
def title(self):
return _("Delete pick-up dates")
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.model.name, self.request.link(self.model)),
Link(self.title, '#')
]
@cached_property
def cancel_url(self):
return self.request.link(self.model)
@cached_property
def success_url(self):
return self.request.link(self.model)
| 28.089109 | 70 | 0.529609 |
13fcd9b4a315b5625ee69751466c191671fb353d
| 605 |
py
|
Python
|
rev/VerytriVialreVersing/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
rev/VerytriVialreVersing/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
rev/VerytriVialreVersing/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
from z3 import *
s = Solver()
target = [0x98, 0x69, 0x98, 0x67, 0x9e, 0x64, 0x9f, 0x77, 0xad, 0x65, 0x76, 0x76, 0xb2, 0x69, 0x9e, 0x73, 0xa9, 0x57, 0xb4, 0x23, 0x9e, 0x77, 0xb3, 0x92, 0xa9, 0x58, 0xae, 0x2d, 0x59, 0x65, 0xa8, 0x15, 0x59, 0x21, 0xad, 0x66, 0xa5]
flag = [BitVec("flag_%s" % i, 8) for i in range(0, len(target))]
for c in flag:
s.add(c >= 0x20)
s.add(c <= 0x7f)
key = [0x13, 0x37]
for i in range(len(target)):
s.add((flag[i] ^ key[0]) + key[1] == target[i])
key = key[::-1]
print(s.check())
model = s.model()
print(''.join([chr(model.eval(i).as_long()) for i in flag]))
| 30.25 | 231 | 0.6 |
26b910af26c7162d598f18a9ec4af981ac091dd6
| 658 |
py
|
Python
|
setup.py
|
showhue/adzuki
|
23dff5b01905ba3622b4846708c6fd9d2fdd7385
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
showhue/adzuki
|
23dff5b01905ba3622b4846708c6fd9d2fdd7385
|
[
"BSD-3-Clause"
] | 5 |
2019-03-19T22:21:28.000Z
|
2020-09-16T03:08:56.000Z
|
setup.py
|
showhue/adzuki
|
23dff5b01905ba3622b4846708c6fd9d2fdd7385
|
[
"BSD-3-Clause"
] | null | null | null |
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='adzuki',
version='0.2.2',
author='SHOWHUE, Ire Sun, Kunda Lee',
author_email='[email protected]',
description='ORM for GCP datastore',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/showhue/adzuki/',
packages=['adzuki'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
],
install_requires=[
'jsonschema>=3.0.1',
'google-cloud-datastore>=1.7.3'
],
)
| 25.307692 | 48 | 0.679331 |
f8488f2e49fd716621094ed670ee0db63e0a997b
| 685 |
py
|
Python
|
source/pkgsrc/devel/py-typed-ast/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/devel/py-typed-ast/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/devel/py-typed-ast/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-setup.py,v 1.1 2018/05/22 06:41:30 adam Exp $
Point to a parent directory, to distinguish includes from
the ones currenly installed with Python.
--- setup.py.orig 2017-07-18 21:23:52.000000000 +0000
+++ setup.py
@@ -9,7 +9,7 @@ except ImportError:
_ast27 = Extension(
'_ast27',
- include_dirs = ['ast27/Include'],
+ include_dirs = ['ast27'],
sources = [
'ast27/Parser/acceler.c',
'ast27/Parser/bitset.c',
@@ -45,7 +45,7 @@ _ast27 = Extension(
_ast3 = Extension(
'_ast3',
- include_dirs = ['ast3/Include'],
+ include_dirs = ['ast3'],
sources = [
'ast3/Parser/acceler.c',
'ast3/Parser/bitset.c',
| 26.346154 | 60 | 0.608759 |
6efd5a880b39ccb9718f04c53428f9814c686c35
| 6,643 |
py
|
Python
|
2018/quals/crypto-better-zip/src/solver.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 2,757 |
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
2018/quals/crypto-better-zip/src/solver.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 20 |
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
2018/quals/crypto-better-zip/src/solver.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 449 |
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
#!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: You need to comment out crc checking & encryption bit checking in the
# zipfile module (just copy it to te directory with solver.py and patch it
# locally).
import zipfile
import hashlib
from z3 import *
import sys
POLY_SZ = 20
class BitStream:
def __init__(self, data, sz=None):
if sz is None:
sz = len(data) * 8
self.sz = sz
self.data = bytearray(data)
self.idx = 0
def get_bit(self):
if self.idx >= self.sz:
raise Exception('All bits used. Go away.')
i_byte = self.idx / 8
i_bit = self.idx % 8
bit = (self.data[i_byte] >> i_bit) & 1
self.idx += 1
return bit
def get_bits(self, sz):
v = 0
for i in xrange(sz):
v |= self.get_bit() << i
return v
class LFSR:
def __init__(self, poly, iv, sz):
self.sz = sz
self.poly = poly
self.r = iv
self.mask = (1 << sz) - 1
def get_bit(self):
bit = (self.r >> (self.sz - 1)) & 1
new_bit = 1
masked = self.r & self.poly
for i in xrange(self.sz):
new_bit ^= (masked >> i) & 1
self.r = ((self.r << 1) | new_bit) & self.mask
return bit
class LFSRCipher:
def __init__(self, key, poly_sz=8, key_iv=None, cipher_iv=None):
if len(key) < poly_sz:
raise Exception('LFSRCipher key length must be at least %i' % poly_sz)
key = BitStream(key)
if key_iv is None:
key_iv = os.urandom(poly_sz)
self.key_iv = key_iv
key_iv_stream = BitStream(key_iv)
if cipher_iv is None:
cipher_iv = os.urandom(poly_sz)
self.cipher_iv = cipher_iv
cipher_iv_stream = BitStream(cipher_iv)
self.lfsr = []
for i in xrange(8):
l = LFSR(key.get_bits(poly_sz) ^ key_iv_stream.get_bits(poly_sz),
cipher_iv_stream.get_bits(poly_sz), poly_sz)
self.lfsr.append(l)
def get_keystream_byte(self):
b = 0
for i, l in enumerate(self.lfsr):
b |= l.get_bit() << i
return b
def get_headers(self):
return self.key_iv + self.cipher_iv
def crypt(self, s):
s = bytearray(s)
for i in xrange(len(s)):
s[i] ^= self.get_keystream_byte()
return str(s)
def split_bits(byte, sz=8):
return [
(byte >> i) & 1 for i in xrange(sz)
]
def rsplit_bits(byte, sz=8):
return [
(byte >> i) & 1 for i in xrange(sz - 1, -1, -1)
]
def known_keystream(data, known):
ks = []
for offset, known_data in known:
known_data = bytearray(known_data)
if offset is None:
continue # TODO(gynvael): Add support
if offset < 0:
offset += len(data)
for i, (act_byte, known_byte) in enumerate(zip(
data[offset:offset + len(known_data)], known_data)):
ks.append((offset + i, split_bits(act_byte ^ known_byte)))
return ks
def general_lsfr_iterate(poly, r):
new_bit = BitVecVal(1, 1)
for i in xrange(POLY_SZ):
new_bit = new_bit ^ (Extract(i, i, poly) & Extract(i, i, r))
new_r = Concat(Extract(POLY_SZ - 2, 0, r), new_bit)
gen_bit = Extract(POLY_SZ - 1, POLY_SZ - 1, r)
return (new_r, gen_bit)
def solve(key_iv, cipher_iv, data, enc_hash, known):
org_key_iv = key_iv
org_cipher_iv = cipher_iv
ks = known_keystream(data, known)
end_offset = max(k[0] for k in ks) + 1
print "Number of known bytes:", len(ks)
print "Number of usable bytes:", (len(ks) - POLY_SZ)
print "End state offset:", end_offset
poly = []
for i in xrange(8):
poly.append(BitVec("poly_%i" % i, POLY_SZ))
r = []
cipher_iv = BitStream(org_cipher_iv)
for i in xrange(8):
r.append(BitVecVal(cipher_iv.get_bits(POLY_SZ), POLY_SZ))
print "Adding states..."
sys.stdout.flush()
state = []
for j in xrange(end_offset):
if j % 65 == 0:
sys.stdout.write("--> %i / %i\r" % (j, end_offset))
sys.stdout.flush()
s = []
for i in xrange(8):
new_r, gen_bit = general_lsfr_iterate(poly[i], r[i])
r[i] = new_r
s.append(gen_bit)
state.append(s)
print "Done! "
print "Solving..."
sys.stdout.flush()
s = Solver()
for offset, bits in ks:
for i in xrange(8):
s.add(state[offset][i] == bits[i])
res = s.check()
key_iv = BitStream(org_key_iv)
key_xor = BitVecVal(key_iv.get_bits(POLY_SZ * 8), POLY_SZ * 8)
while res.r == 1:
m = s.model()
key_bv = poly[0]
for i in xrange(1, 8):
key_bv = Concat(poly[i], key_bv)
key = m.eval(key_bv ^ key_xor).as_long()
key = hex(key)[2:-1].decode('hex')[::-1]
print "Key:", `key`, len(key)
if len(key) == POLY_SZ:
# Try it.
d = LFSRCipher(key, POLY_SZ, org_key_iv, org_cipher_iv)
dec = d.crypt(data)
dec_hash = d.crypt(enc_hash)
act_hash = hashlib.sha256(dec).digest()
print "hash match:", act_hash == dec_hash
if act_hash == dec_hash:
with open("dump_%s.png" % key.encode("hex"), "wb") as f:
f.write(dec)
s.add(key_bv != m.eval(key_bv).as_long())
res = s.check()
print res
return key
# Fun fact: this requires a hacked zipfile module which ignores the
# 'encrypted' flag and crc32 errors.
z = zipfile.ZipFile("../attachments/flag.zip")
data = z.read("flag.png")
key_iv = data[:POLY_SZ]
cipher_iv = data[POLY_SZ:POLY_SZ*2]
enc = bytearray(data[POLY_SZ*2:-32])
enc_hash = data[-32:]
known = [
(0, "89504E470D0A1A0A".decode('hex')), # PNG magic
(8, "0000000D".decode('hex')), # IHDR length
(0x0c, "IHDR"), # IHDR
(0x10, "\0\0"), # Width, last two bytes 0 (BE)
(0x10 + 4, "\0\0"), # Height, last two bytes 0 (BE)
(0x18, "\x08"), # Bit depth (1, 2, 4, 8 or 16)
(0x19, "\2"), # Color type, probably 2 (possible values: 0, 2, 3, 4, 6)
(0x1A, "\0"), # Compression method.
(0x1B, "\0"), # Filter method.
(0x1C, "\0"), # Interlace method, must be 0 or 1, assuming 0
(0x1D + 4, "\0\0\0\1"), # Length of IDAT will be \0\0 or \0\1
(0x1D + 8, "sRGB\0\xae\xce\x1c\xe9\0\0"), # IDAT will have to be there. Probably.
#(-12, "0000000049454E44AE426082".decode('hex')), # full IEND header
]
key = solve(key_iv, cipher_iv, enc, enc_hash, known)
#print "Key:", `key`
| 25.648649 | 84 | 0.618245 |
9041503c6f60fe4bd6e64ead24995d93616168aa
| 8,556 |
py
|
Python
|
apps/users/models.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
apps/users/models.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
apps/users/models.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
import base64
from datetime import datetime, timedelta
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
import markdown
from apps.utils.constants import METHOD_CHOICES, PROTOCOL_CHOICES, OBFS_CHOICES
from node.models import Node
class UserProfile(AbstractUser):
gender_choices = (
('male', '男'),
('female', '女'),
)
# 重写下email字段,保证email不能重复,支持email登录
email = models.EmailField(_('email address'), unique=True)
nick_name = models.CharField(max_length=20, default="", verbose_name="昵称")
birthday = models.DateField(blank=True, null=True, verbose_name="生日")
gender = models.CharField(max_length=6, choices=gender_choices, default='male', verbose_name="性别")
address = models.CharField(max_length=100, default="", verbose_name="地址")
mobile = models.CharField(max_length=11, null=True, blank=True, verbose_name="手机号")
profile_photo = models.ImageField(
max_length=100, upload_to='image/%Y/%m', default='image/default.png', verbose_name="用户头像")
# 业务相关属性
coin_nums = models.DecimalField(verbose_name="硬币数", decimal_places=2, max_digits=10,
default=10, editable=True, null=True, blank=True)
experience = models.PositiveIntegerField(verbose_name="经验", default=0, help_text="用于计算用户等级",
validators=[MaxValueValidator(100), MinValueValidator(0)])
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.username
@classmethod
def user_count(cls):
"""用户总数计算"""
return len(cls.objects.all())
def user_level(self):
"""获取用户的等级"""
# 等级区间
levels = {
0: [0, 10],
1: [10, 50],
2: [50, 200],
3: [200, 500],
4: [500, 1000],
5: [1000, float('inf')]
}
for k, v in levels.items():
if v[0] <= self.experience < v[1]:
return k
return 0
class SSRAccount(models.Model):
expiration_time = models.DateTimeField(verbose_name='SSR有效期', default=timezone.now)
port = models.PositiveIntegerField(verbose_name="用户端口", unique=True)
passwd = models.CharField(max_length=30, null=True, blank=True)
method = models.CharField(verbose_name="加密方法", max_length=30, choices=METHOD_CHOICES,
default='none')
protocol = models.CharField(verbose_name="协议", max_length=30, choices=PROTOCOL_CHOICES,
default='origin')
obfs = models.CharField(verbose_name="混淆方法", max_length=30, choices=OBFS_CHOICES,
default='plain')
compatible = models.BooleanField(verbose_name="是否启用混淆", default=False)
node = models.ForeignKey(Node, on_delete=models.CASCADE, verbose_name="关联的节点")
user = models.OneToOneField(UserProfile, on_delete=models.CASCADE, verbose_name="所有用户")
@classmethod
def available_port(cls):
"""返回一个可用的端口"""
exist_port = cls.objects.values_list('port', flat=True)
ports = list(range(7000, 8000))
return list(set(ports).difference(set(exist_port)))[0]
def __str__(self):
return str(self.port)
@staticmethod
def url_safe64(text):
text = str(text, encoding='utf-8')
text = text.replace(" ", "")
text = text.replace("=", "")
text = text.replace("+", "-")
text = text.replace(r"/", "_")
return text
def ss_base64(self):
txt = "{}:{}@{}:{}".format(self.method, self.passwd, self.node.ip, self.port)
btxt = bytes(txt, encoding='utf-8')
return SSRAccount.url_safe64(base64.b64encode(btxt))
@property
def ss_qrcode(self):
return "https://makeai.cn/qr/?m=2&e=H&p=3&url={}".format(self.ss_base64())
@property
def ss_url(self):
return "ss://{}".format(self.ss_base64())
def ssr_base64(self):
protocol = self.protocol.replace("_compatible", "")
obfs = self.obfs.replace("_compatible", "")
pwdbase64 = SSRAccount.url_safe64(base64.b64encode(bytes(self.passwd, encoding='utf8')))
txt = "{}:{}:{}:{}:{}:{}".format(self.node.ip, self.port, protocol, self.method, obfs, pwdbase64)
btxt = bytes(txt, encoding='utf8')
return SSRAccount.url_safe64(base64.b64encode(btxt))
@property
def ssr_qrcode(self):
return "https://makeai.cn/qr/?m=2&e=H&p=3&url={}".format(self.ssr_base64())
@property
def ssr_url(self):
return "ssr://{}".format(self.ssr_base64())
class UserModifyRecord(models.Model):
"""记录账号信息修改的记录
"""
chics = (
("modify_email", "修改邮箱"),
("modify_password", "修改密码"),
("modify_profile_photo", "修改头像"),
)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, verbose_name="账号")
modify_type = models.CharField(choices=chics, max_length=25, verbose_name="修改类型")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "账号修改记录"
verbose_name_plural = verbose_name
class Announcement(models.Model):
"""公告界面"""
time = models.DateTimeField('时间', auto_now_add=True)
body = models.TextField('主体')
def __str__(self):
return '日期:{}'.format(str(self.time)[:9])
# 重写save函数,将文本渲染成markdown格式存入数据库
def save(self, *args, **kwargs):
# 首先实例化一个MarkDown类,来渲染一下body的文本 成为html文本
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
])
self.body = md.convert(self.body)
# 调动父类save 将数据保存到数据库中
super(Announcement, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = '系统公告'
ordering = ('-time', )
class WorkOrder(models.Model):
"""工单"""
stats = {
('open', 'open'),
('closed', 'closed')
}
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, verbose_name="用户")
title = models.CharField(max_length=50, verbose_name="标题")
body = models.TextField(verbose_name="工单内容")
status = models.CharField(max_length=20, default="open", choices=stats, verbose_name="工单状态")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
def __str__(self):
return self.title
class Meta:
verbose_name = "工单"
verbose_name_plural = verbose_name
class DataUsageRecord(models.Model):
"""记录用户在各个时间点的流量使用情况"""
# model循环导入问题
# user = models.ForeignKey("goods.SsrAccount", on_delete=models.CASCADE, verbose_name="用户")
ssr = models.ForeignKey(SSRAccount, on_delete=models.CASCADE, verbose_name="SSR账户")
bytes_received = models.CharField(max_length=20, null=False, blank=False, verbose_name="收到的数据")
bytes_sent = models.CharField(max_length=20, null=False, blank=False, verbose_name="发送的数据")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "用户流量使用情况"
verbose_name_plural = verbose_name
def last_week(self):
pass
@classmethod
def last_30_days(cls, ssr):
"""最近一个月的流量使用情况,粒度为1天"""
now = datetime.now()
step = timedelta(days=1)
start = now - timedelta(days=30)
data_x = cls.date_range(start, now, step)
usages = []
for d in data_x:
data_usage = cls.objects.filter(ssr=ssr, add_time__year=d.year, add_time__month=d.month,
add_time__day=d.day)
yu = data_usage[0].bytes_sent if data_usage else 0
yd = data_usage[0].bytes_received if data_usage else 0
usage = {
"x": d,
"yu": int(yu)//1024,
"yd": int(yd)//1024,
}
usages.append(usage)
return usages
@staticmethod
def date_range(start, stop, step):
"""迭代时间"""
while start < stop:
yield start
start += step
class TradeRecord(models.Model):
""" 交易明细"""
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, verbose_name="用户")
amount = models.DecimalField('交易金额', max_digits=19, decimal_places=4)
time = models.PositiveIntegerField(verbose_name="购买天数")
add_time = models.DateTimeField(verbose_name='交易时间', default=datetime.now)
| 35.65 | 105 | 0.629383 |
5fa2be19b01e97f3d2449e4fb2a57f9cc732b0fe
| 2,230 |
py
|
Python
|
pupil_invisible_monitor/src/pupil_invisible_monitor/ui.py
|
JuBepy/Gaze_Mouse
|
4ddea30b4f53deb744dac3f370e7f48baa3b99c2
|
[
"MIT"
] | null | null | null |
pupil_invisible_monitor/src/pupil_invisible_monitor/ui.py
|
JuBepy/Gaze_Mouse
|
4ddea30b4f53deb744dac3f370e7f48baa3b99c2
|
[
"MIT"
] | null | null | null |
pupil_invisible_monitor/src/pupil_invisible_monitor/ui.py
|
JuBepy/Gaze_Mouse
|
4ddea30b4f53deb744dac3f370e7f48baa3b99c2
|
[
"MIT"
] | null | null | null |
import logging
from pyglui import ui
import glfw.GLFW as glfw
from .models import Host_Controller
logger = logging.getLogger(__name__)
THUMB_SETTINGS = dict(label_font="opensans", label_offset_size=0)
class HostViewController:
def __init__(self, gui_parent, controller: Host_Controller):
self.gui_parent = gui_parent
controller.add_observer("on_host_added", self.on_host_added)
controller.add_observer("on_host_removed", self.on_host_removed)
controller.add_observer("on_host_changed", self.on_host_changed)
self.controller = controller
def on_host_added(self, host_idx):
logger.debug(f"on_host_added({host_idx})")
host = self.controller[host_idx]
host_thumb = self.thumb_for_host(host)
self.gui_parent.insert(host_idx, host_thumb)
def on_host_removed(self, host_idx):
logger.debug(f"on_host_removed({host_idx})")
del self.gui_parent[host_idx]
def on_host_changed(self, host_idx):
logger.debug(f"on_host_changed({host_idx})")
host = self.controller[host_idx]
thumb = self.gui_parent[host_idx]
if host.is_linked and host.is_in_bad_state:
iris_dark_blue = 0.157, 0.208, 0.576, 1.0
thumb.on_color[:] = iris_dark_blue
elif host.is_linked and host.is_available:
iris_green = 0.024, 0.631, 0.145, 1.0
thumb.on_color[:] = iris_green
elif host.is_linked and not host.is_available:
retina_red = 0.957, 0.263, 0.212, 1.0
thumb.on_color[:] = retina_red
# ensure ui update
if thumb.status_text == "":
thumb.status_text = " "
else:
thumb.status_text = ""
def cleanup(self):
self.gui_parent = None
self.controller = None
def thumb_for_host(self, host):
def link_host(turn_on):
self.controller.link(host)
host_thumb = ui.Thumb(
"is_linked",
host,
setter=link_host,
label=host.name[:2],
hotkey=host.name[0].lower(),
**THUMB_SETTINGS,
)
return host_thumb
| 32.794118 | 73 | 0.61435 |
398fb01aa80f3c157990a73fd51f91d9cea8a894
| 1,188 |
py
|
Python
|
bot/utils.py
|
kopytjuk/uni-unterkunft
|
c81664e0070f97f45baa6eaff6a71039a267fd37
|
[
"MIT"
] | null | null | null |
bot/utils.py
|
kopytjuk/uni-unterkunft
|
c81664e0070f97f45baa6eaff6a71039a267fd37
|
[
"MIT"
] | null | null | null |
bot/utils.py
|
kopytjuk/uni-unterkunft
|
c81664e0070f97f45baa6eaff6a71039a267fd37
|
[
"MIT"
] | null | null | null |
from math import radians, cos, sin, asin, sqrt
from functools import wraps
import telegram
from jinja2 import Environment, FileSystemLoader
# load Jinja
file_loader = FileSystemLoader("bot/templates/")
jinja_env = Environment(loader=file_loader)
def send_action(action: telegram.ChatAction):
"""Sends `action` while processing func command."""
def decorator(func):
@wraps(func)
def command_func(update, context, *args, **kwargs):
context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)
return func(update, context, *args, **kwargs)
return command_func
return decorator
def haversine(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371000 # Radius of earth in kilometers. Use 3956 for miles
return c * r
| 31.263158 | 97 | 0.675084 |
4b4a98b1da98c6e973fbf428b88c03223b8988a8
| 1,545 |
py
|
Python
|
site/public/courses/BEW-1.1/Lessons/03-Intro-to-Flask/demo/compliments.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | 1 |
2021-08-24T20:22:19.000Z
|
2021-08-24T20:22:19.000Z
|
site/public/courses/BEW-1.1/Lessons/03-Intro-to-Flask/demo/compliments.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | null | null | null |
site/public/courses/BEW-1.1/Lessons/03-Intro-to-Flask/demo/compliments.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from random import choice, sample
app = Flask(__name__)
compliments = [
'awesome', 'terrific', 'fantastic', 'neato', 'fantabulous', 'wowza',
'oh-so-not-meh', 'brilliant', 'ducky', 'coolio', 'incredible',
'wonderful', 'smashing', 'lovely', 'tenacious', 'Pythonic']
@app.route('/')
def index():
"""Show the homepage and ask the user's name."""
return """
<form action='/compliment'>
<p>
What is your name?
<input type="text" name="name"/>
</p>
<p>
<input type="checkbox" name="show_compliments"/>
Show Compliments
</p>
<p>
How many compliments?
<select name="num_compliments">
<option value="1">One</option>
<option value="2">Two</option>
<option value="3">Three</option>
</select>
</p>
<input type="submit">
</form>
"""
@app.route('/compliment')
def get_compliment():
"""Give the user a compliment"""
name = request.args.get('name')
num_compliments = int(request.args.get('num_compliments'))
show_compliments = request.args.get('show_compliments')
nice_things = ', '.join(sample(compliments, num_compliments))
if show_compliments:
return f'Hello there, {name}! You are so {nice_things}!'
else:
return f'Hello there, {name}! Have a nice day!'
if __name__ == '__main__':
app.run(debug=True)
| 30.294118 | 73 | 0.553398 |
d9bc5d770e7449aeae18fbf04df34af1c0a26cbd
| 511 |
py
|
Python
|
SleekSecurity/layers/plugins/fingerprint/os/linux.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
SleekSecurity/layers/plugins/fingerprint/os/linux.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
SleekSecurity/layers/plugins/fingerprint/os/linux.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @name: Wascan - Web Application Scanner
# @repo: https://github.com/m4ll0k/Wascan
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'LICENSE.txt
from re import search,I
def linux(headers):
os = ["linux","ubuntu","gentoo","debian","dotdeb","centos","redhat","sarge","etch",
"lenny","squeeze","wheezy","jessie","red hat","scientific linux"]
for o in os:
for header in headers.items():
if search(o,header[1],I):
return o.title()
| 30.058824 | 84 | 0.639922 |
d9e60018505377d00cb43890dbef1bbdbcdcfca9
| 4,348 |
py
|
Python
|
oneflow/python/test/ops/test_object_segm_poly_flip.py
|
caishenghang/oneflow
|
db239cc9f98e551823bf6ce2d4395bd5c339b1c5
|
[
"Apache-2.0"
] | 2 |
2021-09-10T00:19:49.000Z
|
2021-11-16T11:27:20.000Z
|
oneflow/python/test/ops/test_object_segm_poly_flip.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/test/ops/test_object_segm_poly_flip.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | 1 |
2021-11-10T07:57:01.000Z
|
2021-11-10T07:57:01.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import random
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _of_object_segm_poly_flip(poly_list, image_size, flip_code):
poly_shape = _get_segm_poly_static_shape(poly_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def object_segm_poly_flip_job(
poly_def: oft.ListListNumpy.Placeholder(
shape=tuple(poly_shape), dtype=flow.float
),
image_size_def: oft.ListNumpy.Placeholder(
shape=image_size.shape, dtype=flow.int32
),
):
poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
flip_poly = flow.object_segmentation_polygon_flip(
poly_buffer, image_size_def, flip_code
)
return flow.tensor_buffer_to_tensor_list(
flip_poly, shape=poly_shape[1:], dtype=flow.float
)
input_poly_list = [np.expand_dims(bbox, axis=0) for bbox in poly_list]
poly_tensor = object_segm_poly_flip_job([input_poly_list], [image_size]).get()
return poly_tensor.numpy_lists()[0]
def _get_segm_poly_static_shape(poly_list):
poly_shapes = [poly.shape for poly in poly_list]
poly_static_shape = np.amax(poly_shapes, axis=0)
assert isinstance(
poly_static_shape, np.ndarray
), "poly_shapes: {}, poly_static_shape: {}".format(
str(poly_shapes), str(poly_static_shape)
)
poly_static_shape = poly_static_shape.tolist()
poly_static_shape.insert(0, len(poly_list))
return poly_static_shape
def _compare_segm_poly_flip(
test_case, anno_file, batch_size, flip_code, print_debug_info=False
):
from pycocotools.coco import COCO
coco = COCO(anno_file)
img_ids = coco.getImgIds()
segm_poly_list = []
image_size_list = []
sample_cnt = 0
while sample_cnt < batch_size:
rand_img_id = random.choice(img_ids)
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
poly_pts = []
for anno_id in anno_ids:
anno = coco.anns[anno_id]
if anno["iscrowd"] != 0:
continue
assert isinstance(anno["segmentation"], list)
for poly in anno["segmentation"]:
assert isinstance(poly, list)
poly_pts.extend(poly)
poly_array = np.array(poly_pts, dtype=np.single).reshape(-1, 2)
segm_poly_list.append(poly_array)
image_size_list.append(
[coco.imgs[rand_img_id]["width"], coco.imgs[rand_img_id]["height"]]
)
sample_cnt += 1
image_size_array = np.array(image_size_list, dtype=np.int32)
of_segm_poly_list = _of_object_segm_poly_flip(
segm_poly_list, image_size_array, flip_code
)
for of_poly, poly, image_size in zip(
of_segm_poly_list, segm_poly_list, image_size_list
):
w, h = image_size
if flip_code == 1:
poly[:, 0] = w - poly[:, 0]
else:
raise NotImplementedError
if print_debug_info:
print("-" * 20)
print("of_poly:", of_poly.squeeze().shape, "\n", of_poly.squeeze())
print("poly:", poly.shape, "\n", poly)
test_case.assertTrue(np.allclose(of_poly.squeeze(), poly))
@flow.unittest.skip_unless_1n1d()
class TestObjectSegmPolyFlip(flow.unittest.TestCase):
def test_object_segm_poly_flip(test_case):
_compare_segm_poly_flip(
test_case, "/dataset/mscoco_2017/annotations/instances_val2017.json", 4, 1
)
if __name__ == "__main__":
unittest.main()
| 32.939394 | 86 | 0.680313 |
d9ffa4ed62396578882ab6ed8db60402863b18c3
| 2,311 |
py
|
Python
|
tests/rbac/common/role/propose_admin_helper_tests.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 75 |
2018-04-06T09:13:34.000Z
|
2020-05-18T18:59:47.000Z
|
tests/rbac/common/role/propose_admin_helper_tests.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 989 |
2018-04-18T21:01:56.000Z
|
2019-10-23T15:37:09.000Z
|
tests/rbac/common/role/propose_admin_helper_tests.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 72 |
2018-04-13T18:29:12.000Z
|
2020-05-29T06:00:33.000Z
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Propose Role Admin Helper Test"""
# pylint: disable=no-member
import pytest
from rbac.common.crypto.keys import Key
from rbac.common import protobuf
from rbac.common.logs import get_default_logger
from tests.rbac.common import helper
LOGGER = get_default_logger(__name__)
@pytest.mark.role
@pytest.mark.library
def test_id():
"""Test get a random proposal id"""
id1 = helper.role.admin.propose.id()
id2 = helper.role.admin.propose.id()
assert isinstance(id1, str)
assert isinstance(id2, str)
assert len(id1) == 24
assert len(id2) == 24
assert id1 != id2
@pytest.mark.role
@pytest.mark.library
def test_reason():
"""Test get a random reason"""
reason1 = helper.role.admin.propose.reason()
reason2 = helper.role.admin.propose.reason()
assert isinstance(reason1, str)
assert isinstance(reason2, str)
assert len(reason1) > 4
assert len(reason2) > 4
assert reason1 != reason2
@pytest.mark.role
def test_create():
"""A user creates an add role admin proposal
to add themselves as an admin to a role"""
proposal, role, role_owner, role_owner_key, user, user_key = (
helper.role.admin.propose.create()
)
assert isinstance(proposal, protobuf.proposal_state_pb2.Proposal)
assert isinstance(role, protobuf.role_state_pb2.RoleAttributes)
assert isinstance(user, protobuf.user_state_pb2.User)
assert isinstance(role_owner, protobuf.user_state_pb2.User)
assert isinstance(user_key, Key)
assert isinstance(role_owner_key, Key)
assert proposal.object_id == role.role_id
assert proposal.related_id == user.next_id
| 32.549296 | 79 | 0.710082 |
6ab8886e1732e2c8335d09626a3d391ae699e716
| 200 |
py
|
Python
|
exercises/de/solution_01_02_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/de/solution_01_02_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/de/solution_01_02_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
# Importiere spaCy
import spacy
# Erstelle ein englisches nlp-Objekt
nlp = spacy.blank("en")
# Verarbeite einen Text
doc = nlp("This is a sentence.")
# Drucke den Text des Dokuments
print(doc.text)
| 18.181818 | 36 | 0.74 |
be20b268542bd9fb5b925f915610a8d808658171
| 529 |
py
|
Python
|
actions.py
|
matthewmercuri/mdbacktestv2
|
2525113458cdf71e7d99ac208ad35aea92169b99
|
[
"MIT"
] | null | null | null |
actions.py
|
matthewmercuri/mdbacktestv2
|
2525113458cdf71e7d99ac208ad35aea92169b99
|
[
"MIT"
] | null | null | null |
actions.py
|
matthewmercuri/mdbacktestv2
|
2525113458cdf71e7d99ac208ad35aea92169b99
|
[
"MIT"
] | null | null | null |
class Actions:
'''Have to figure out a way where everytime a method
in this class is called, it goes out and gets the
current df and portfolo
'''
def _test(self):
print(self.todays_df())
def _check_valid_buy(self, symbol, quantity):
pass
def _check_valid_sell(self, symbol, quantity):
pass
def buy(self, symbol, quantity, todays_df, portfolio):
print(todays_df)
print(portfolio)
def sell(self, symbol, quantity, todays_df, portfolio):
pass
| 24.045455 | 59 | 0.642722 |
ed633771a0dcf96096590223c8408989d8dc4e56
| 174 |
py
|
Python
|
utest/start-all.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5 |
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
utest/start-all.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
utest/start-all.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
os.system(sys.executable + " widgets_test.py")
os.system(sys.executable + " api.py")
os.system(sys.executable + " examples.py")
| 19.333333 | 46 | 0.718391 |
71fb05950ffb76adfddb70032efa0658312ff59b
| 259 |
py
|
Python
|
foundation/www/index.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 59 |
2017-03-15T08:14:52.000Z
|
2021-11-17T14:21:58.000Z
|
foundation/www/index.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 147 |
2017-01-25T10:44:47.000Z
|
2020-11-05T04:24:22.000Z
|
foundation/www/index.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 134 |
2017-03-14T14:04:21.000Z
|
2022-03-18T08:19:47.000Z
|
# See license.txt
from __future__ import unicode_literals
import frappe
def get_context(context):
out = {
"events": frappe.get_all("Portal Event", fields=["event_title", "event_date", "details", "route"], filters=[["published", "=", 1]])
}
return out
| 23.545455 | 133 | 0.694981 |
9c1dc541272c6bfb6340f18018f434bdd18e18d3
| 2,744 |
py
|
Python
|
src/server/app/endpoints/chatting/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | null | null | null |
src/server/app/endpoints/chatting/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | null | null | null |
src/server/app/endpoints/chatting/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | null | null | null |
from re import U
from flask import Flask, Blueprint, render_template, abort, g, request
from flask_socketio import emit, join_room, leave_room, send
from ...db.settings import db, oidc, socketio, mongoclient
from ...repository.PresentationRepositoryNew import broadCastMessage, broadCastMessageExceptOwn
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt, set_access_cookies, get_jti,
set_refresh_cookies, unset_jwt_cookies, decode_token)
from ...repository.ChatRepository import ChatRepository
from ...repository.AuthenticationRepository import AuthenticationRepository
import json
from bson import json_util
import threading
cRepo = ChatRepository(testing=False)
aRepo = AuthenticationRepository(testing=False)
chatting_endpoint = Blueprint("chatting", __name__,
static_folder="static", template_folder="templates")
@chatting_endpoint.route('/')
def index():
return render_template('/editor-redesign/index.html')
@chatting_endpoint.route('/getChatting', methods=["GET"])
@jwt_required
def getChannels():
print("Der Bua hat seine Channels angefordert!")
return json.dumps({"res": cRepo.getChannels(u_id=get_jwt_identity())})
@chatting_endpoint.route('/getAllMessages', methods=["POST"])
@jwt_required
def getAllMessagesRoute():
p_id = request.form['p_id']
return json.dumps({"res": cRepo.getAllMessages(p_id=p_id)})
@socketio.on('connectUserToChat')
def connectUserToChat(json):
print("User is joining the chat!")
join_room(json["user_id"])
@socketio.on('messageSentToServer')
def userHasConnected(json):
u_id = json["user_id"]
message = json["message"]
p_id = json["p_id"]
res = cRepo.insertMessage(u_id, message, p_id)
thread = threading.Thread(target=broadCastMessage, kwargs=dict(event="messageSent", pres_id=json["p_id"], msg=json_util.dumps(res)))
thread.start()
@socketio.on('userIsTyping')
def userHastStartedTyping(json):
u_id = json["user_id"]
user = aRepo.retrieveUserWithOutTimeChange(user_id=u_id)
thread = threading.Thread(target=broadCastMessageExceptOwn, kwargs=dict(event="userStartedTyping",pres_id=json["p_id"], msg=json_util.dumps(user), sender_id=json["user_id"]))
thread.start()
@socketio.on('userHasStoppedTyping')
def userHasStoppedTyping(json):
print("Der user hat mitn typen aufghlrt")
u_id = json["user_id"]
user = aRepo.retrieveUserWithOutTimeChange(user_id=u_id)
thread = threading.Thread(target=broadCastMessageExceptOwn, kwargs=dict(event="userStoppedTyping",pres_id=json["p_id"], msg=json_util.dumps(user), sender_id=json["user_id"]))
thread.start()
| 35.179487 | 178 | 0.754738 |
9c4cd1bf0908682aeaddf96f929c88caca627328
| 4,868 |
py
|
Python
|
StockDataTest_v3/model_H,bs1,ts1,tf100/stock_3.py
|
TrunkingW/Deep_Learning
|
85add772a2c613f8034985872eb3e44683d47bee
|
[
"MIT"
] | null | null | null |
StockDataTest_v3/model_H,bs1,ts1,tf100/stock_3.py
|
TrunkingW/Deep_Learning
|
85add772a2c613f8034985872eb3e44683d47bee
|
[
"MIT"
] | null | null | null |
StockDataTest_v3/model_H,bs1,ts1,tf100/stock_3.py
|
TrunkingW/Deep_Learning
|
85add772a2c613f8034985872eb3e44683d47bee
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
rnn_unit=10
input_size=7
output_size=1
lr=0.0006
f=open('test.csv')
df=pd.read_csv(f)
data=df.iloc[:,2:10].values
def get_train_data(batch_size=60,time_step=20,train_begin=0,train_end=3627):
batch_index=[]
data_train=data[train_begin:train_end]
normalized_train_data=(data_train-np.mean(data_train,axis=0))/np.std(data_train,axis=0)
train_x,train_y=[],[]
for i in range(len(normalized_train_data)-time_step):
if i % batch_size==0:
batch_index.append(i)
x=normalized_train_data[i:i+time_step,:7]
y=normalized_train_data[i:i+time_step,7,np.newaxis]
train_x.append(x.tolist())
train_y.append(y.tolist())
batch_index.append((len(normalized_train_data)-time_step))
return batch_index,train_x,train_y
def get_test_data(time_step=20,test_begin=3627):
data_test=data[test_begin:]
mean=np.mean(data_test,axis=0)
std=np.std(data_test,axis=0)
normalized_test_data=(data_test-mean)/std
#size=(len(normalized_test_data)+time_step-1)//time_step
size=(len(normalized_test_data)+time_step)//time_step
test_x,test_y=[],[]
for i in range(size-1):
x=normalized_test_data[i*time_step:(i+1)*time_step,:7]
y=normalized_test_data[i*time_step:(i+1)*time_step,7]
test_x.append(x.tolist())
test_y.extend(y)
test_x.append((normalized_test_data[(i+1)*time_step:,:7]).tolist())
test_y.extend((normalized_test_data[(i+1)*time_step:,7]).tolist())
return mean,std,test_x,test_y
weights={
'in':tf.Variable(tf.random_normal([input_size,rnn_unit])),
'out':tf.Variable(tf.random_normal([rnn_unit,1]))
}
biases={
'in':tf.Variable(tf.constant(0.1,shape=[rnn_unit,])),
'out':tf.Variable(tf.constant(0.1,shape=[1,]))
}
def lstm(X):
batch_size=tf.shape(X)[0]
time_step=tf.shape(X)[1]
w_in=weights['in']
b_in=biases['in']
input=tf.reshape(X,[-1,input_size])
input_rnn=tf.matmul(input,w_in)+b_in
input_rnn=tf.reshape(input_rnn,[-1,time_step,rnn_unit])
cell=tf.nn.rnn_cell.BasicLSTMCell(rnn_unit)
init_state=cell.zero_state(batch_size,dtype=tf.float32)
output_rnn,final_states=tf.nn.dynamic_rnn(cell, input_rnn,initial_state=init_state, dtype=tf.float32)
output=tf.reshape(output_rnn,[-1,rnn_unit])
w_out=weights['out']
b_out=biases['out']
pred=tf.matmul(output,w_out)+b_out
return pred,final_states
def train_lstm(batch_size=1,time_step=1,train_begin=1,train_end=3707):
X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
batch_index,train_x,train_y=get_train_data(batch_size,time_step,train_begin,train_end)
pred,_=lstm(X)
loss=tf.reduce_mean(tf.square(tf.reshape(pred,[-1])-tf.reshape(Y, [-1])))
train_op=tf.train.AdamOptimizer(lr).minimize(loss)
saver=tf.train.Saver(tf.global_variables(),max_to_keep=15)
module_file = tf.train.latest_checkpoint('./')
with tf.Session() as sess:
#sess.run(tf.global_variables_initializer())
saver.restore(sess, module_file)
for i in range(101):
for step in range(len(batch_index)-1):
_,loss_=sess.run([train_op,loss],feed_dict={X:train_x[batch_index[step]:batch_index[step+1]],Y:train_y[batch_index[step]:batch_index[step+1]]})
print(i,loss_)
if i % 100==0 and i>1:
print("SaveMode:",saver.save(sess,'./stock2.model',global_step=i))
#train_lstm()
def prediction(time_step=1,test_begin=3706):
X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
#Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
mean,std,test_x,test_y=get_test_data(time_step,test_begin)
pred,_=lstm(X)
saver=tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
module_file = tf.train.latest_checkpoint('./')
saver.restore(sess, module_file)
test_predict=[]
for step in range(len(test_x)-1):
prob=sess.run(pred,feed_dict={X:[test_x[step]]})
predict=prob.reshape((-1))
test_predict.extend(predict)
test_y=np.array(test_y)*std[7]+mean[7]
test_predict=np.array(test_predict)*std[7]+mean[7]
#acc=np.average(np.abs(test_predict-test_y[:len(test_predict)])/test_y[:len(test_predict)])
plt.figure()
plt.plot(list(range(len(test_predict))), test_predict, color='b')
plt.plot(list(range(len(test_y))), test_y, color='r')
np.savetxt('Output_test_predict.csv', test_predict, delimiter=",")
np.savetxt('Output_test_y.csv', test_y, delimiter=",")
plt.show()
prediction()
| 36.059259 | 159 | 0.673583 |
92c65123a6c522968afa5a515527ab9b438ba792
| 1,706 |
py
|
Python
|
python/odml/nix_demo.py
|
mpsonntag/snippets
|
fc3cc42ea49b885c1f29c0aef1379055a931a978
|
[
"BSD-3-Clause"
] | null | null | null |
python/odml/nix_demo.py
|
mpsonntag/snippets
|
fc3cc42ea49b885c1f29c0aef1379055a931a978
|
[
"BSD-3-Clause"
] | null | null | null |
python/odml/nix_demo.py
|
mpsonntag/snippets
|
fc3cc42ea49b885c1f29c0aef1379055a931a978
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
import nixio as nix
fnbase = Path.joinpath(Path.home(), 'Chaos', 'work')
fnraw = str(Path.joinpath(fnbase,
'_Lab_Zimmer/calcium_imaging/results/N2/urx/shift210421/20120705Pflp178GCaMP5kshift210421W7URXx2.log'))
nixfn = str(Path.joinpath(fnbase, 'ginrepos', 'demo', 'elegans_oxygen.nix'))
# row_wise read in of csv file
data = pd.read_csv(fnraw)
# transpose to get columns
tdata = data.transpose()
# get df/f column as array
steps = tdata.values[0]
dff = tdata.values[5]
# load data into nix
nixfile = nix.File.open(nixfn, nix.FileMode.Overwrite)
b = nixfile.create_block(name="oxygen_shift_trials", type_="calcium_imaging")
# use a group to structure the individual trials within a block
g = b.create_group(name="N2_URX_shift_210421_20120705", type_="trial.datacollection")
# add steps column
da = b.create_data_array(name="20120705_frames", array_type="trial.column", data=steps)
da.label = "frames"
# add dF/F column
da = b.create_data_array(name="20120705_df_over_f", array_type="trial.column", data=dff)
da.label = "dF/F"
# Add the second dimension to the data array
dim = da.append_sampled_dimension(steps[1] - steps[0])
dim.label = "frames"
# Structuring our data
g.data_arrays.append(b.data_arrays["20120705_frames"])
g.data_arrays.append(b.data_arrays["20120705_df_over_f"])
# plot figure from file
fig, ax = plt.subplots()
ax.plot(b.data_arrays["20120705_df_over_f"][:])
ax.set(xlabel=b.data_arrays["20120705_df_over_f"].dimensions[0].label,
ylabel=b.data_arrays["20120705_df_over_f"].label,
title="URX oxygen shift trial (21-04-21)")
plt.show()
nixfile.close()
| 28.915254 | 129 | 0.743259 |
13e7327b4d99b3fd39e85a80b65855147801e4af
| 173 |
py
|
Python
|
kettle_etl/test.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | null | null | null |
kettle_etl/test.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | null | null | null |
kettle_etl/test.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | 1 |
2022-02-11T04:44:37.000Z
|
2022-02-11T04:44:37.000Z
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/8/16 下午5:15
# @Author : Latent
# @Email : [email protected]
# @File : test.py
# @Software: PyCharm
# @class :
| 15.727273 | 30 | 0.595376 |
b9e161d322f05c7b5100d4c6312496ca0dd549af
| 615 |
py
|
Python
|
PINp/2014/Cheraneva A.S/task_6_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Cheraneva A.S/task_6_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Cheraneva A.S/task_6_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 6. Вариант 24.
#Создайте игру, в которой компьютер загадывает название одной из четырех основных мастей лошадей, а игрок должен ее угадать.
#Cheraneva A.S.
#25.03.2016
import random
m=('Вороная', 'Гнедая', 'Серая', 'Рыжая')
a=random.randint(0,2)
rand=m[a]
print('Как вы думаете, название какой из четырех основным мастей лошадей я загадал? Вороная, Гнедая, Серая или Рыжая?')
otvet=0
while (otvet)!=(rand):
otvet=input('Введите название масти:')
if (otvet)!=(rand):
print('Вы не угадали,попробуйте снова.')
elif (otvet)==(rand):
print('Вы угадали.')
break
input('Нажмите Enter для выхода.')
| 24.6 | 124 | 0.715447 |
dbfd89ef256e806d456f2672b2c638dc1dff7eaa
| 1,402 |
py
|
Python
|
test.py
|
ikeikeikeike/scrapy-2ch-summary-spider
|
7142693f25025a09390377649a727cfd33d15af3
|
[
"MIT"
] | 2 |
2015-01-12T08:23:35.000Z
|
2017-07-28T15:02:26.000Z
|
test.py
|
ikeikeikeike/scrapy-2ch-summary-spider
|
7142693f25025a09390377649a727cfd33d15af3
|
[
"MIT"
] | null | null | null |
test.py
|
ikeikeikeike/scrapy-2ch-summary-spider
|
7142693f25025a09390377649a727cfd33d15af3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from scrapy.spider import BaseSpider
from scrapy.http.request import Request
from scrapy.utils.response import open_in_browser
class TestSpider(BaseSpider):
""" Testing Spider
"""
name = 'test'
allowed_domains = [
'miru-navi.com',
# 'map-fuzoku.com'
]
start_urls = [
'http://miru-navi.com',
# 'http://map-fuzoku.com'
]
links = [
'http://miru-navi.com/stores'
]
def parse(self, response):
""" main
"""
# print
# print
# print response.headers
# open_in_browser(response)
print response.headers
print 'parse'
print 'parse'
print 'parse'
print 'parse'
print 'parse'
print 'parse'
print 'parse'
print 'parse'
for link in self.links:
yield Request(link, method="GET", callback=self.spider_page)
def spider_page(self, response):
print response.headers
print
print
print
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
print 'spider_page'
# print response.headers
# open_in_browser(response)
| 21.90625 | 72 | 0.554922 |
16475993a8a522232fa5f5db3024d410f9c3de75
| 2,964 |
py
|
Python
|
scripts/calendar_view_gui/utils/imgLib.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 17 |
2021-01-18T07:27:01.000Z
|
2022-03-10T12:26:21.000Z
|
scripts/calendar_view_gui/utils/imgLib.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 4 |
2021-04-29T11:20:44.000Z
|
2021-12-06T10:19:17.000Z
|
scripts/calendar_view_gui/utils/imgLib.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 47 |
2021-01-21T08:25:22.000Z
|
2022-03-21T14:28:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Guido Lemoine
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
# Version : 1.0 - 2020-04-05
"""Simple image processing routines for client side chip handling"""
import rasterio
import numpy as np
from matplotlib import cm
def normalizedDifference(band0, band1, output):
# Calculates the normalized difference between 2 input bands
# band0, band1 - rasterio readable object
# output - rasterio writable object
with rasterio.open(band0) as b0:
band_0 = b0.read(1)
kwargs = b0.meta
with rasterio.open(band1) as b1:
band_1 = b1.read(1)
# Allow division by zero
np.seterr(divide='ignore', invalid='ignore')
# Calculate NDVI
nd = (band_0.astype(float) - band_1.astype(float)) / (band_0 + band_1)
# Set spatial characteristics of the output object to mirror the input
kwargs.update(
dtype=rasterio.float32,
count = 1)
# Create the file
with rasterio.open(output, 'w', **kwargs) as dst:
dst.write_band(1, nd.astype(rasterio.float32))
return True
def dBratio(band0, band1, output):
# Calculates the ratio between 2 input bands and dB scales that
# band0, band1 - rasterio readable object
# output - rasterio writable object
with rasterio.open(band0) as b0:
band_0 = b0.read(1)
kwargs = b0.meta
with rasterio.open(band1) as b1:
band_1 = b1.read(1)
# Allow division by zero
np.seterr(divide='ignore', invalid='ignore')
# Calculate NDVI
ratio = 10.0*np.log10(band_0.astype(float)/band_1.astype(float))
# Set spatial characteristics of the output object to mirror the input
kwargs.update(
dtype=rasterio.float32,
count = 1)
# Create the file
with rasterio.open(output, 'w', **kwargs) as dst:
dst.write_band(1, ratio.astype(rasterio.float32))
return True
def scaled_palette(band, min, max, matplot_cmap):
# Scales band to a np.uint8 image, clipping between min and max and
# assigns a matplotlib cmap of colormap
# Check:
# https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html
cmap = cm.get_cmap(matplot_cmap, 256)
cmap = (255.0*cmap(range(256))).astype(np.uint8)
lut = {}
for i in range(256):
lut[i] = tuple(cmap[i].astype(np.uint8))
with rasterio.Env():
with rasterio.open(band) as src:
image = src.read(1)
meta = src.meta
image = np.clip(255 * (image - min) /(max - min), 0, 255).astype(np.uint8)
meta['dtype'] = rasterio.uint8
print(meta)
with rasterio.open(band.replace('.tif', '_scaled.tif'), 'w', **meta) as dst:
dst.write(image, indexes=1)
dst.write_colormap(1, lut)
| 33.681818 | 88 | 0.635628 |
16796e3a975cd0bc7dcc860b0f5e52c8a4ea9d9c
| 1,795 |
py
|
Python
|
extraction/links_in_context/extract_linkcontext.py
|
dbmdz/webarchiv-dh-bestandsausbau
|
98c271a09cdb026d1d58133f49dcb3e1c9fcf9b6
|
[
"MIT"
] | null | null | null |
extraction/links_in_context/extract_linkcontext.py
|
dbmdz/webarchiv-dh-bestandsausbau
|
98c271a09cdb026d1d58133f49dcb3e1c9fcf9b6
|
[
"MIT"
] | null | null | null |
extraction/links_in_context/extract_linkcontext.py
|
dbmdz/webarchiv-dh-bestandsausbau
|
98c271a09cdb026d1d58133f49dcb3e1c9fcf9b6
|
[
"MIT"
] | null | null | null |
import re
from typing import List
from bs4 import BeautifulSoup
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType, FloatType
def normalize_whitespace(text: str) -> str:
text = text.strip()
text = re.sub(r"\s+", " ", text)
return text
def extract_links_in_context(html: str) -> List[List[str]]:
parsed_html = BeautifulSoup(html, features="html.parser")
links = parsed_html.find_all("a", attrs={"href": True})
results = []
for link in links:
dest = link.attrs["href"]
anchor = link.text
siblings = link.find_next_siblings(
"a", attrs={"href": True}
) + link.find_previous_siblings("a", attrs={"href": True})
if len(siblings) > 1:
context = link.text
else:
context = link.parent.text
if len(context) > 1000:
anchor_index = context.index(link.text)
start_index = max(0, anchor_index - 500)
end_index = min(len(context) - 1, anchor_index + 500)
context = context[start_index:end_index]
anchor = normalize_whitespace(anchor)
context = normalize_whitespace(context)
results.append((dest, anchor, context))
return results
extract_links_in_context_udf = udf(
extract_links_in_context, ArrayType(ArrayType(StringType()))
)
def normalize_url(url: str) -> str:
url = url.strip()
url = url.lower()
url = re.sub(r"^(https?://)?(www\.)?", "", url)
url = re.sub(r"/$", "", url)
return url
normalize_url_udf = udf(normalize_url, StringType())
def get_inv_path_depth(url: str) -> float:
path_depth = url.count("/") + 1
return float(1 / path_depth)
get_inv_path_depth_udf = udf(get_inv_path_depth, FloatType())
| 28.951613 | 69 | 0.627855 |
169e294f0dad2ad26c5671f9be0e739e673b63c8
| 9,575 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/rabbitmq_queue.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/rabbitmq_queue.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/rabbitmq_queue.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Manuel Sousa <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: Manuel Sousa (@manuel-sousa)
short_description: Manage rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue
required: true
state:
description:
- Whether the queue should be present or absent
choices: [ "present", "absent" ]
default: present
durable:
description:
- whether queue is durable or not
type: bool
default: 'yes'
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
type: bool
default: 'no'
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
max_priority:
description:
- Maximum number of priority levels for the queue to support.
- If not set, the queue will not support message priorities.
- Larger numbers indicate higher priority.
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
default: {}
extends_documentation_fragment:
- community.general.rabbitmq
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue:
name: myQueue
# Create a queue on remote host
- rabbitmq_queue:
name: myRemoteQueue
login_user: user
login_password: secret
login_host: remote.example.org
'''
import json
import traceback
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
from ansible_collections.community.general.plugins.module_utils.rabbitmq import rabbitmq_argument_spec
def main():
argument_spec = rabbitmq_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
durable=dict(default=True, type='bool'),
auto_delete=dict(default=False, type='bool'),
message_ttl=dict(default=None, type='int'),
auto_expires=dict(default=None, type='int'),
max_length=dict(default=None, type='int'),
dead_letter_exchange=dict(default=None, type='str'),
dead_letter_routing_key=dict(default=None, type='str'),
arguments=dict(default=dict(), type='dict'),
max_priority=dict(default=None, type='int')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
url = "%s://%s:%s/api/queues/%s/%s" % (
module.params['login_protocol'],
module.params['login_host'],
module.params['login_port'],
urllib_parse.quote(module.params['vhost'], ''),
module.params['name']
)
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
result = dict(changed=False, name=module.params['name'])
# Check if queue already exists
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
if r.status_code == 200:
queue_exists = True
response = r.json()
elif r.status_code == 404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg="Invalid response from RESTAPI when trying to check if queue exists",
details=r.text
)
if module.params['state'] == 'present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
) and
(
('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
) and
(
('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
) and
(
('x-dead-letter-exchange' in response['arguments'] and
response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
) and
(
('x-dead-letter-routing-key' in response['arguments'] and
response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
) and
(
('x-max-priority' in response['arguments'] and
response['arguments']['x-max-priority'] == module.params['max_priority']) or
('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
)
):
module.fail_json(
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k, v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key',
'max_priority': 'x-max-priority'
}.items():
if module.params[k] is not None:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
result['changed'] = change_required
result['details'] = response
result['arguments'] = module.params['arguments']
module.exit_json(**result)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth=(module.params['login_user'], module.params['login_password']),
headers={"content-type": "application/json"},
data=json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
}),
verify=module.params['ca_cert'],
cert=(module.params['client_cert'], module.params['client_key'])
)
elif module.params['state'] == 'absent':
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
result['changed'] = True
module.exit_json(**result)
else:
module.fail_json(
msg="Error creating queue",
status=r.status_code,
details=r.text
)
else:
module.exit_json(
changed=False,
name=module.params['name']
)
if __name__ == '__main__':
main()
| 37.256809 | 136 | 0.60658 |
4c6e35772bca6b8de77b665c9d373974f2dec0a4
| 9,075 |
py
|
Python
|
helper/segmentation/offline_augmentation.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2022-01-03T14:00:17.000Z
|
2022-01-03T14:00:17.000Z
|
helper/segmentation/offline_augmentation.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | null | null | null |
helper/segmentation/offline_augmentation.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2021-12-21T09:50:53.000Z
|
2021-12-21T09:50:53.000Z
|
import sys
#Change path specificly to your directories
sys.path.insert(1, '/home/codahead/Fishial/FishialReaserch')
import os
import cv2
import json
import copy
import warnings
import imgaug as ia
import imgaug.augmenters as iaa
from shutil import copyfile
from imgaug.augmentables.polys import Polygon
from tqdm import tqdm
from module.classification_package.src.utils import save_json
# The script will create an augumentation images, according the loader(transformer).
# That script we use if need us to use 'offline' data augumentation.
def get_max_id(data):
ids = []
for i in data['images']:
if 'id' in i:
ids.append(i['id'])
return max(ids)
def convert_aug_pol_to_two_array(poi):
array_of_polygon = []
for i in poi:
array_segm = []
for idx, z in enumerate(i):
x = int(z[0] if int(z[0]) > 0 else 0)
y = int(z[1] if int(z[1]) > 0 else 0)
if (x and y) == 0: continue
array_segm.append(x)
array_segm.append(y)
array_of_polygon.append(array_segm)
return array_of_polygon
# import data
path_to_json = r'fishial_collection/fishial_collection_correct.json'
# path to augmented dataset
path_to_aug_dataset = r'fishial_collection/Train-aug'
img_path_main = r'fishial_collection/Train'
# count of augmented img
cnt_aug = 5
aug2 = iaa.JpegCompression(compression=(70, 99))
aug3 = iaa.Affine(rotate=(-45, 45))
aug4 = iaa.AdditiveGaussianNoise(scale=0.08 * 255, per_channel=True)
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(-0.05, 0.1),
pad_mode=ia.ALL,
pad_cval=(0, 255)
)),
sometimes(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
# add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
]),
iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-10, 10), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-4, 0),
first=iaa.Multiply((0.5, 1.5), per_channel=True),
second=iaa.LinearContrast((0.5, 2.0))
)
]),
iaa.LinearContrast((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
iaa.Grayscale(alpha=(0.0, 1.0)),
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
# move pixels locally around (with random strengths)
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), # sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
json_tmp = json.load(open(path_to_json))
unique_img_array = []
os.makedirs(path_to_aug_dataset, exist_ok=True)
result_dict = copy.deepcopy(json_tmp)
bodyes_shapes_ids = []
for i in json_tmp['categories']:
if i['name'] == 'General body shape':
bodyes_shapes_ids.append(int(i['id']))
for img_ma in tqdm(range(len(json_tmp['images']))):
if 'train_data' in json_tmp['images'][img_ma]:
if not json_tmp['images'][img_ma]['train_data']: continue
img_main = os.path.join(img_path_main, json_tmp['images'][img_ma]['file_name'])
dst_path = os.path.join(path_to_aug_dataset, os.path.basename(json_tmp['images'][img_ma]['file_name']))
copyfile(img_main, dst_path)
image = cv2.imread(dst_path)
single_img = []
for ann in json_tmp['annotations']:
if 'segmentation' in ann and ann['image_id'] == json_tmp['images'][img_ma]['id'] and ann[
'category_id'] in bodyes_shapes_ids:
single_img.append(Polygon(ann['segmentation']))
psoi = ia.PolygonsOnImage(single_img, shape=image.shape)
for cnt_aug_idx in range(cnt_aug):
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
if cnt_aug_idx == 0:
image_aug, psoi_aug = seq(image=image, polygons=psoi)
elif cnt_aug_idx == 1:
image_aug, psoi_aug = aug2(image=image, polygons=psoi)
elif cnt_aug_idx == 2:
image_aug, psoi_aug = aug3(image=image, polygons=psoi)
else:
image_aug, psoi_aug = seq(image=image, polygons=psoi)
array_of_polygon = convert_aug_pol_to_two_array(psoi_aug)
# save aug image
title, ext = os.path.splitext(os.path.basename(img_main))
if ext == '':
ext = '.png'
aug_image_name = '{}_aug_{}{}'.format(title, cnt_aug_idx, ext)
cv2.imwrite(os.path.join(path_to_aug_dataset, aug_image_name), image_aug)
# save new image record to json
tmp_img_dict = json_tmp['images'][img_ma]
tmp_img_dict['id'] = get_max_id(result_dict)
tmp_img_dict['file_name'] = aug_image_name
result_dict['images'].append(tmp_img_dict)
for idx_single_polygon, single_converted_poly in enumerate(array_of_polygon):
if len(single_converted_poly) < 10: continue
tmp_poly = {
'segmentation': single_converted_poly,
'image_id': tmp_img_dict['id'],
'category_id': 1
}
result_dict['annotations'].append(tmp_poly)
except Exception:
print("Error ! name file: {} ".format(img_main))
continue
save_json(result_dict, os.path.join('fishial_collection', 'fishial_collection_correct_aug.json'))
| 45.833333 | 119 | 0.556915 |
d5fe7e57f5e807d278e8fa87e8f07a9c3c185106
| 684 |
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-006/ph-6.24-break-continue.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-006/ph-6.24-break-continue.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-006/ph-6.24-break-continue.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
terminate_program = False
while not terminate_program:
number1 = input("Please enter a number: ")
number1 = int(number1)
number2 = input("Please enter another number: ")
number2 = int(number2)
while True:
operation = input("Please enter add/sub or quit to exit: ")
if operation == "quit":
terminate_program = True
break
if operation not in ["add", "sub"]:
print("Unknown operation!")
continue
if operation == "add":
print("Result is", number1 + number2)
break
if operation == "sub":
print("Result is", number1 - number2)
break
| 31.090909 | 67 | 0.559942 |
914abb8f64ae36bbb92d74a3ed34e1337f556564
| 2,576 |
py
|
Python
|
begin-python/cp2/chapter2.py
|
Jocs/reading-notes
|
26b8331877a2de034b8860bc3e3967893112d52d
|
[
"MIT"
] | 3 |
2021-08-04T07:59:48.000Z
|
2022-03-26T23:58:17.000Z
|
begin-python/cp2/chapter2.py
|
Jocs/reading-notes
|
26b8331877a2de034b8860bc3e3967893112d52d
|
[
"MIT"
] | null | null | null |
begin-python/cp2/chapter2.py
|
Jocs/reading-notes
|
26b8331877a2de034b8860bc3e3967893112d52d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf8 -*-
# ##第二章:列表和元组
# ###2.1 序列的操作
fourth = raw_input('Year: ')[3]
print fourth
# 分片
number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
partial = number[2:-2]
print partial
print number[-3:]
# 复制序列
anotherNumber = number[:]
print anotherNumber
# 步长
print number[::2]
# 序列相加
number2 = [1, 2, 3] + [4, 5, 6]
# 序列的乘法
str1 = 'python ' * 4
print str1
print [10] * 10
print [None] * 10
# 成员资格
permissions = 'rw'
print 'w' in permissions
user = ['jocs', 'ransixi']
print 'jocs' in user
database = [
['albert', '1234'],
['dilert', '4242'],
['smith', '7524'],
['jocs', '29']
]
username = raw_input('User name: ')
pin = raw_input('User pin: ')
if [username, pin] in database: print 'Access granted'
# len\max\min 方法的使用
number3 = [1, 2, 3, 4, 5]
print len(number3)
print max(number3)
print min(number3)
hello = 'hello world'
list1 = list(hello)
print list1
print '*'.join(list1)
# 基本的列表操作
# 删除元素
names = ['jocs', 'ransixi']
del names[1]
print names
# 分片赋值
perl = list('perl')
perl[2:] = list('python')
print perl
# 通过分片来添加和删除元素
js = list('javascript')
js[1:1] = list('java')
print js
js[1:5] = []
print js
# ####2.3.3 列表方法
# 1. `append`方法用于在列表末尾追加新的对象:
lst = [1, 2, 3]
lst.append(4)
print lst
# 2. `count`方法同于统计某个元素在列表中出现的次数:
lis = [1, 2, 3, 4, 6, 5, 3]
print lis.count(3)
# 3. `extend`方法可以一次性在列表末尾追加另一个序列中的多个值,用新列表扩展原有列表:
a = [1, 2, 3]
b = [4, 5, 6]
a.extend(b)
print a
# 4. `index`方法用于找出某个值在列表中第一个匹配的索引位置:
knights = ['we', 'are', 'the', 'knight', 'who', 'say', 'ni']
print knights.index('who')
# 5. `insert`方法用于将对象插入到列表:
number = [1, 2, 3, 4, 5]
number.insert(3, 'who')
print number
# 6. `pop`方法用于移除列表中的某一个元素,(默认值是最后一个),并且返回该元素的值,该函数参数为 index
x = [1, 2, 3, 4]
print x.pop(3)
print x
# 7. `remove`方法用于移除列表中某个值的第一个匹配项
x = [1, 2, 3, 4, 6, 'as', 4]
x.remove('as')
print x
# 8. `reverse`方法将列表的元素反向存放
x = [1, 2, 3]
x.reverse()
# 9. `sort`方法用于在原位置对列表排序, *会改变原来的列表*,`sorted`方法不会改变原来列表,会返回一个新的列表
x = [7, 5, 3, 1]
x.sort()
# 10. 高级排序
x = ['example1.py', 'printBox.py', 'xx']
x.sort(key=len)
print x
# ###元组:不可变序列
# 元组和列表一样,也是一种序列,唯一不同的是,元组不可以修改。通过小括号来创建元组
# 1. tuple 函数将列表转化为元组
x = [1, 2, 3]
print tuple(x)
print tuple('abc')
# **练习** 创建一个 box,里面包含文字
sentence = raw_input('Whatever you input: ')
screenWidth = 80
textWidth = len(sentence)
boxWidth = textWidth + 6
paddingWidth = (screenWidth - boxWidth) // 2
print '-' * screenWidth
print ' ' * paddingWidth + '|' + ' ' * boxWidth + '|'
print ' ' * paddingWidth + '| ' + sentence + ' |'
print ' ' * paddingWidth + '|' + ' ' * boxWidth + '|'
print '-' * screenWidth
| 16.512821 | 65 | 0.617236 |
e68e200b3481c35e3c4c48c061b28b73040697f4
| 1,580 |
py
|
Python
|
ANN/e01/e01.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 10 |
2020-12-08T20:18:15.000Z
|
2021-06-07T20:00:07.000Z
|
ANN/e01/e01.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2 |
2021-06-28T03:42:13.000Z
|
2021-06-28T16:53:13.000Z
|
ANN/e01/e01.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2 |
2021-01-14T19:59:20.000Z
|
2021-06-15T11:53:21.000Z
|
#########################
# Vinicius Gasparini #
# Análise Numérica #
# Exercicio 01 #
#########################
def f(x):
return x ** 5 - 4 * x - 3
def verificacao_bolzano(a, b):
print("*** Verificação Bolzano ***")
fa = f(a)
fb = f(b)
print("f(a)= %.4f\nf(b)= %.4f" % (fa, fb))
if f(a) * f(b) < 0:
print("Como a f(a)*f(b) < 0 ∃ x | f(x) = 0")
return True
print("Como a f(a)*f(b) >= 0 ∄ x | f(x) = 0")
return False
def bissecao(a, b, max_iter):
print("\n*** Método da Bisseção ***")
print("Procurando uma raiz no intervalo [%.3f,%.3f]" % (a, b))
print("Iteração | (x , y)")
fa = f(a)
for i in range(max_iter):
p = a + (b - a) / 2
print("%d | ( %.6f , %.6f )" % (i + 1, p, f(p)))
fp = f(p)
if fa * fp > 0:
a = p
fa = fp
else:
b = p
return p
def bissecao2(a, b, epsilon):
cont = 1
fa = f(a)
while (b - a) / 2 >= epsilon:
p = a + (b - a) / 2
fp = f(p)
if fa * fp > 0:
a = p
fa = fp
else:
b = p
cont += 1
return cont
a = 0.646
b = 2.431
max_iter = 8
epsilon = 10 ** (-14)
if __name__ == "__main__":
if verificacao_bolzano(a, b):
raiz = bissecao(a, b, max_iter)
cont = bissecao2(a, b, epsilon)
print("\nRaiz encontrada após %d iterações = %.6f" % (max_iter, raiz))
print("Iterações para erro menor que 10e-14 = %d" % cont)
else:
print("O intervalo não possui raiz")
| 22.253521 | 78 | 0.437342 |
e6987fccce65bb6bf364810e196af09800ac5c5c
| 402 |
py
|
Python
|
Topics/Sorting/Insertion-Sort/insertion-sort.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Topics/Sorting/Insertion-Sort/insertion-sort.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Topics/Sorting/Insertion-Sort/insertion-sort.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def insertion_sort(a: list) -> list:
a, n = a.copy(), len(a)
for i in range(1, n):
for j in range(i, 0, -1):
if a[j-1] <= a[j]:
break
a[j-1], a[j] = a[j], a[j-1]
return a
if __name__ == "__main__":
from random import randint
nums = [randint(1, 50) for _ in range(10)]
print(nums)
print(insertion_sort(nums))
print(nums)
| 23.647059 | 46 | 0.502488 |
5db44633b825bf93c9fa9958fcc315608dc78996
| 5,441 |
py
|
Python
|
research/hpc/pinns/src/Schrodinger/net.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/hpc/pinns/src/Schrodinger/net.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/hpc/pinns/src/Schrodinger/net.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the PINNs network for the Schrodinger equation."""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import Parameter, Tensor, nn, ops
from mindspore.common.initializer import TruncatedNormal, Zero, initializer
from mindspore.ops import constexpr
@constexpr
def _generate_ones(batch_size):
arr = np.ones((batch_size, 1), np.float32)
return Tensor(arr, mstype.float32)
@constexpr
def _generate_zeros(batch_size):
arr = np.zeros((batch_size, 1), np.float32)
return Tensor(arr, mstype.float32)
class neural_net(nn.Cell):
"""
Neural net to fit the wave function
Args:
layers (list(int)): num of neurons for each layer
lb (np.array): lower bound (x, t) of domain
ub (np.array): upper bound (x, t) of domain
"""
def __init__(self, layers, lb, ub):
super(neural_net, self).__init__()
self.layers = layers
self.concat = ops.Concat(axis=1)
self.lb = Tensor(lb, mstype.float32)
self.ub = Tensor(ub, mstype.float32)
self.tanh = ops.Tanh()
self.add = ops.Add()
self.matmul = ops.MatMul()
self.w0 = self._init_weight_xavier(0)
self.b0 = self._init_biase(0)
self.w1 = self._init_weight_xavier(1)
self.b1 = self._init_biase(1)
self.w2 = self._init_weight_xavier(2)
self.b2 = self._init_biase(2)
self.w3 = self._init_weight_xavier(3)
self.b3 = self._init_biase(3)
self.w4 = self._init_weight_xavier(4)
self.b4 = self._init_biase(4)
def construct(self, x, t):
"""Forward propagation"""
X = self.concat((x, t))
X = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
X = self.tanh(self.add(self.matmul(X, self.w0), self.b0))
X = self.tanh(self.add(self.matmul(X, self.w1), self.b1))
X = self.tanh(self.add(self.matmul(X, self.w2), self.b2))
X = self.tanh(self.add(self.matmul(X, self.w3), self.b3))
X = self.add(self.matmul(X, self.w4), self.b4)
return X[:, 0:1], X[:, 1:2]
def _init_weight_xavier(self, layer):
"""
Initialize weight for the ith layer
"""
in_dim = self.layers[layer]
out_dim = self.layers[layer+1]
std = np.sqrt(2/(in_dim + out_dim))
name = 'w' + str(layer)
return Parameter(default_input=initializer(TruncatedNormal(std), [in_dim, out_dim], mstype.float32),
name=name, requires_grad=True)
def _init_biase(self, layer):
"""
Initialize biase for the ith layer
"""
name = 'b' + str(layer)
return Parameter(default_input=initializer(Zero(), self.layers[layer+1], mstype.float32),
name=name, requires_grad=True)
class Grad_1(nn.Cell):
"""
Net has 2 inputs and 2 outputs. Using the first output to compute gradient.
"""
def __init__(self, net):
super(Grad_1, self).__init__()
self.net = net
self.grad = ops.GradOperation(get_all=True, sens_param=True)
def construct(self, x, t):
sens_1 = _generate_ones(x.shape[0])
sens_2 = _generate_zeros(x.shape[0])
return self.grad(self.net)(x, t, (sens_1, sens_2))
class Grad_2(nn.Cell):
"""
Net has 2 inputs and 2 outputs. Using the second output to compute gradient.
"""
def __init__(self, net):
super(Grad_2, self).__init__()
self.net = net
self.grad = ops.GradOperation(get_all=True, sens_param=True)
def construct(self, x, t):
sens_1 = _generate_zeros(x.shape[0])
sens_2 = _generate_ones(x.shape[0])
return self.grad(self.net)(x, t, (sens_1, sens_2))
class PINNs(nn.Cell):
"""
PINNs for the Schrodinger equation.
"""
def __init__(self, layers, lb, ub):
super(PINNs, self).__init__()
self.nn = neural_net(layers, lb, ub)
self.du = Grad_1(self.nn)
self.dv = Grad_2(self.nn)
self.dux = Grad_1(self.du)
self.dvx = Grad_1(self.dv)
self.add = ops.Add()
self.pow = ops.Pow()
self.mul = ops.Mul()
def construct(self, X):
"""forward propagation"""
x = X[:, 0:1]
t = X[:, 1:2]
u, v = self.nn(x, t)
ux, ut = self.du(x, t)
vx, vt = self.dv(x, t)
uxx, _ = self.dux(x, t)
vxx, _ = self.dvx(x, t)
square_sum = self.add(self.pow(u, 2), self.pow(v, 2))
fu1 = self.mul(vxx, 0.5)
fu2 = self.mul(square_sum, v)
fu = self.add(self.add(ut, fu1), fu2)
fv1 = self.mul(uxx, -0.5)
fv2 = self.mul(square_sum, u)
fv2 = self.mul(fv2, -1.0)
fv = self.add(self.add(vt, fv1), fv2)
return u, v, ux, vx, fu, fv
| 32.580838 | 108 | 0.596949 |
538dc29dcc71f698df4061690facdf881bb91c72
| 5,844 |
py
|
Python
|
mod/units/update_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | 1 |
2015-06-28T15:26:52.000Z
|
2015-06-28T15:26:52.000Z
|
mod/units/update_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | null | null | null |
mod/units/update_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | 6 |
2015-03-20T16:36:22.000Z
|
2021-08-28T07:58:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2014-12-11 21:49:24
# @Author : [email protected]
import tornado.web
from sqlalchemy.orm.exc import NoResultFound
from ..models.course import Course
from ..models.user import User
from ..models.gpa import Overview as GPAO, Detail as GPAD
from ..models.srtp import Overview as SRTPO, Detail as SRTPD
from get_api_return import get_api_return
TEMPLATE = u'<center><h1 style="margin-top:30%">{content}</h2></center>'
class UpdateHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get(self, type, openid):
try:
db = self.db
user = self.db.query(User).filter(User.openid == openid).one()
if type == 'srtp':
response = get_api_return('srtp', user, timeout=17)
if response['code'] == 200:
srtp = response['content']
try:
overview = db.query(SRTPO).filter(SRTPO.openid == user.openid).one()
overview.total = srtp[0]['total']
overview.score = srtp[0]['score']
except NoResultFound:
db.add(SRTPO(openid=user.openid,
total=srtp[0]['total'],
score=srtp[0]['score']))
items = db.query(SRTPD).filter(SRTPD.openid == user.openid).all()
for item in items:
db.delete(item)
for item in srtp[1:]:
db.add(SRTPD(openid=user.openid,
project=item['project'],
department=item['department'],
date=item['date'],
project_type=item['type'],
total_credit=item['total credit'],
credit=item['credit'],
proportion=item['proportion']))
try:
db.commit()
self.write(TEMPLATE.format(content=u'更新好啦'))
except:
db.rollback()
self.write(TEMPLATE.format(content=u'T T 出了点小问题'))
else:
self.write(TEMPLATE.format(content=response['content']))
elif type == 'gpa':
response = get_api_return('gpa', user, timeout=30)
if response['code'] == 200:
gpa = response['content']
try:
overview = db.query(GPAO).filter(GPAO.openid == user.openid).one()
overview.gpa = gpa[0]['gpa']
overview.before_revamp = gpa[0]['gpa without revamp']
overview.calc_time = gpa[0]['calculate time']
except NoResultFound:
db.add(GPAO(openid=user.openid,
gpa=gpa[0]['gpa'],
before_revamp=gpa[0]['gpa without revamp'],
calc_time=gpa[0]['calculate time']))
items = db.query(GPAD).filter(GPAD.openid == user.openid).all()
for item in items:
db.delete(item)
for item in gpa[1:]:
db.add(GPAD(openid=user.openid,
course=item['name'],
credit=item['credit'],
semester=item['semester'],
score=item['score'],
score_type=item['type'],
extra=item['extra']))
try:
db.commit()
self.write(TEMPLATE.format(content=u'更新好啦'))
except:
db.rollback()
self.write(TEMPLATE.format(content=u'T T 出了点小问题'))
else:
self.write(TEMPLATE.format(content=response['content']))
elif type == 'curriculum':
response = get_api_return('curriculum', user, timeout=30)
if response['code'] == 200:
courses = db.query(Course).filter(Course.openid == user.openid).all()
curriculum = response['content']
for course in courses:
db.delete(course)
for day, items in curriculum.items():
for item in items:
db.add(Course(openid=user.openid,
course=item[0],
period=item[1],
place=item[2],
day=day))
try:
db.commit()
mycontent = response['content']
self.write(TEMPLATE.format(content=mycontent))#u'更新好啦'))
except:
db.rollback()
self.write(TEMPLATE.format(content=u'T T 出了点小问题'))
else:
self.write(TEMPLATE.format(content=response['content']))
else:
self.write(TEMPLATE.format(content=u'T T 出了点小问题???'))
except:
self.write(TEMPLATE.format(content=u'T T 出了点小问题...'))
self.finish()
def on_finish(self):
self.db.close()
| 47.901639 | 93 | 0.422656 |
54ca9e80fb49364d221f881daff8c15fbf96c587
| 1,703 |
py
|
Python
|
Database.py
|
lichtwellenreiter/shotstand
|
5413662b76d5a810b36ca5ae8b84f41fc6645cc8
|
[
"MIT"
] | null | null | null |
Database.py
|
lichtwellenreiter/shotstand
|
5413662b76d5a810b36ca5ae8b84f41fc6645cc8
|
[
"MIT"
] | null | null | null |
Database.py
|
lichtwellenreiter/shotstand
|
5413662b76d5a810b36ca5ae8b84f41fc6645cc8
|
[
"MIT"
] | null | null | null |
import sqlite3
class Database:
def __init__(self, dbname):
self.conn = sqlite3.connect(dbname)
self.conn.execute("CREATE TABLE IF NOT EXISTS shotmeter (" \
"id INTEGER PRIMARY KEY, " \
"groupname TEXT not null , " \
"shotcount INTEGER," \
"CONSTRAINT groupname_uq UNIQUE (groupname))")
def add_entry(self, groupname, shots):
cur = self.conn.cursor()
sql = "SELECT * FROM shotmeter where groupname = '{groupname}' LIMIT 1".format(groupname=groupname)
cur.execute(sql)
print(sql)
rows = cur.fetchall()
print(len(rows))
print(rows)
if len(rows) > 0:
newshots = rows[0][2] + int(shots)
print(newshots)
sql = "UPDATE shotmeter set shotcount = {shotcount} where groupname = '{groupname}'".format(
shotcount=newshots,
groupname=groupname)
else:
sql = "INSERT INTO shotmeter ('groupname', 'shotcount') VALUES ('{groupname}', {shotcount})".format(
groupname=groupname, shotcount=shots)
self.conn.execute(sql)
self.conn.commit()
def get_grounames(self):
sql = "select groupname from shotmeter"
rows = self.conn.execute(sql)
names = []
for name in rows:
print(name)
names.append(name[0])
return names
def get_group_shots(self):
sql = "select * from shotmeter order by shotcount desc"
cur = self.conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
print(rows)
return rows
| 31.537037 | 112 | 0.547857 |
54f757a167eda4dbab0542fc88c420749882a126
| 1,730 |
py
|
Python
|
src/blockchain/merkle_tree.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
src/blockchain/merkle_tree.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
src/blockchain/merkle_tree.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
import hashlib
import logging
from typing import List
class Tree_Node:
def __init__(self, left, right, value: str) -> None:
self.left: Tree_Node = left
self.right: Tree_Node = right
self.value = value
@staticmethod
def hash(val: str) -> str:
return hashlib.sha256(val.encode('utf-8')).hexdigest()
@staticmethod
def doubleHash(val: str) -> str:
return Tree_Node.hash(Tree_Node.hash(val))
class MerkleTree:
def __init__(self, values: List[str]) -> None:
self.__buildTree(values)
def __buildTree(self, values: List[str]) -> None:
leaves: List[Tree_Node] = [Tree_Node(None, None, Tree_Node.doubleHash(e)) for e in values]
if len(leaves) % 2 == 1:
leaves.append(leaves[-1:][0]) # duplicate last elem if odd number of elements
self.root: Tree_Node = self.__buildTreeRec(leaves)
def __buildTreeRec(self, nodes: List[Tree_Node]) -> Tree_Node:
half: int = len(nodes) // 2
if len(nodes) == 2:
return Tree_Node(nodes[0], nodes[1],
Tree_Node.doubleHash(nodes[0].value + nodes[1].value))
left: Tree_Node = self.__buildTreeRec(nodes[:half])
right: Tree_Node = self.__buildTreeRec(nodes[half:])
value: str = Tree_Node.doubleHash(left.value + right.value)
return Tree_Node(left, right, value)
def printTree(self) -> None:
self.__printTreeRec(self.root)
def __printTreeRec(self, node) -> None:
if node is not None:
logging.info(node.value)
self.__printTreeRec(node.left)
self.__printTreeRec(node.right)
def getRootHash(self) -> str:
return self.root.value
| 32.037037 | 98 | 0.622543 |
0714b36405a398266b808b94ce602286652e23d9
| 2,087 |
py
|
Python
|
archive/muconvert.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
archive/muconvert.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
archive/muconvert.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python -i
"""
Change the input/output_file_path to select other files
"""
from mumath.treebuilder import treebuilder
import xml.etree.ElementTree as et
input_file_path = "mumath/sample.mu"
output_file_path= "mumath/sample.html"
def mumath(**options):
### LOAD EXTENSIONS
extensions = options.get("extension")
### PRE-PROCESSING
NotImplemented
### BUILD TREE
ifile = options.get("ifile")
with open(ifile, mode="r", encoding="utf-8") as ifile:
text = ifile.read()
root = treebuilder(text)
### POST-PROCESSING
NotImplemented
### PRINT COMPILED TEXT TO FILE
ofile = options.get("ofile")
with open(ofile, mode="w", encoding="utf-8") as ofile:
ofile.write("""<!DOCTYPE html>""")
ofile.write("""
<html>
<head>
<title></title>
<meta charset="utf-8">
<body>""")
writer(ofile, root, 0)
INLINE = {}
OPTIONAL = {}
SELFCLOSE = {"mprescripts", "none"}
# sample writer
def writer(file, tree, level):
if tree.tag not in INLINE: file.write("\n"+level*"\t")
if tree.tag is et.Comment:
file.write(f"<!--")
else:
file.write(f"<{tree.tag}")
for attribute, value in tree.attrib.items():
file.write(f' {attribute}="{value}"')
if tree.tag in SELFCLOSE:
file.write("/>") #xml
else:
file.write(">")
#content
if tree.text:
lines = tree.text.splitlines()
file.write(lines[0])
if tree.tag != "pre":
for line in lines[1:]:
file.write("\n" + level*"\t" + line)
else:
for line in lines[1:]:
print(line)
file.write("\n" + line)
#subtree
if tree:
for subtree in tree:
if tree.tag != "pre": writer(file, subtree, level+1)
else: writer(file, subtree, -100)
#closing tag
if tree.tag not in INLINE: file.write("\n" + level*"\t")
if tree.tag not in SELFCLOSE:
if tree.tag is et.Comment:
file.write("-->")
else:
file.write(f"</{tree.tag}>")
#tail
if tree.tail:
lines = tree.tail.splitlines()
file.write(lines[0])
for line in lines[1:]:
file.write("\n" + level*"\t" + line)
if __name__ == "__main__":
mumath(
ifile = input_file_path,
ofile = output_file_path,
extension = [],
)
| 20.460784 | 57 | 0.641591 |
4ad86ff106a5b20030f4c1493db9a52d1b48aad9
| 223 |
py
|
Python
|
oop/class_testing.py
|
karinakozarova/Learning-Python
|
217dfc8ca6931a238445daf0b84e188c02916c52
|
[
"MIT"
] | 1 |
2019-04-07T23:14:29.000Z
|
2019-04-07T23:14:29.000Z
|
oop/class_testing.py
|
karinakozarova/Learning-Python
|
217dfc8ca6931a238445daf0b84e188c02916c52
|
[
"MIT"
] | null | null | null |
oop/class_testing.py
|
karinakozarova/Learning-Python
|
217dfc8ca6931a238445daf0b84e188c02916c52
|
[
"MIT"
] | null | null | null |
class Car:
def __init__(self,color,model):
self.color = color
self.model = model
def print(self):
return self.color + " " + self.model
car1 = Car("green","Toyota")
print(car1.print())
| 18.583333 | 44 | 0.578475 |
ab302693c3279ea347f27f6fb202308aab25a2d3
| 7,565 |
py
|
Python
|
AI_Engine_Development/Feature_Tutorials/12-axis-traffic-generator/sw/pysrc/run_traffic_gen.py
|
jlamperez/Vitis-Tutorials
|
9a5b611caabb5656bbb2879116e032227b164bfd
|
[
"Apache-2.0"
] | 567 |
2019-10-01T16:31:26.000Z
|
2022-03-31T18:43:30.000Z
|
AI_Engine_Development/Feature_Tutorials/12-axis-traffic-generator/sw/pysrc/run_traffic_gen.py
|
jlamperez/Vitis-Tutorials
|
9a5b611caabb5656bbb2879116e032227b164bfd
|
[
"Apache-2.0"
] | 142 |
2019-11-25T14:42:16.000Z
|
2022-03-31T15:06:56.000Z
|
AI_Engine_Development/Feature_Tutorials/12-axis-traffic-generator/sw/pysrc/run_traffic_gen.py
|
jlamperez/Vitis-Tutorials
|
9a5b611caabb5656bbb2879116e032227b164bfd
|
[
"Apache-2.0"
] | 387 |
2019-10-10T09:14:00.000Z
|
2022-03-31T02:51:02.000Z
|
#
#© Copyright 2021 Xilinx, Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
from xilinx_xtlm import ipc_axis_master_util
from xilinx_xtlm import ipc_axis_slave_util
from xilinx_xtlm import xtlm_ipc
import struct
import multiprocessing as mp
import numpy as np
import copy as copy
import matplotlib.pyplot as plt
import sys
class IQData():
def __init__(self, numpy_cplx_data,aietype='cint16',plio_width=32,supressplots=0):
""" Initialization """
self.input_cplx_data = numpy_cplx_data
self.parent_conn0, self.child_conn0 = mp.Pipe()
self.aietype = aietype
self.plio_width = plio_width
self.rx_payload_len = -1
self.tx_payload_len = -1
self.supressplots = supressplots
def self_test(self):
print("inside self_test")
self.input_cplx_data = MakeCountingPattern(16)
#self.plot_results(self.input_cplx_data,iqdata)
t1 = self.convert_numpy_to_bytes()
self.convert_bytes_to_numpy(t1)
def rx_from_aie(self):
payload = self.rx_axis.sample_transaction()
#This call blocks until the AIE passes some data to the AXI SIM IPC SLAVE
cvec = self.convert_bytes_to_numpy(payload.data)
self.child_conn0.send(cvec)
print("Received AIE Output. Sending to parent thread for processing")
def tx_to_aie(self,iq_data_as_bytes,test):
NumBytesToSend = len(iq_data_as_bytes)
#print("xmit: len Bytes = %d" % NumBytesToSend)
NumBytesPerBeat = self.plio_width//8
NumTrans = NumBytesToSend//NumBytesPerBeat
print("NumBytesToSend=%d,NumBytesPerTransaction=%d,TotalTransactions=%d" % (NumBytesToSend,NumBytesPerBeat,NumTrans))
for i in range(NumTrans):
data2send = iq_data_as_bytes[(i*NumBytesPerBeat):(i*NumBytesPerBeat)+NumBytesPerBeat]
#Stride through byte array in steps of BytesPerBeat
payload = xtlm_ipc.axi_stream_packet()
#Create a axi stream packet object
payload.data_length = NumBytesPerBeat
#Tell the object how much data will be sent in bytes
if(i == NumTrans-1):
payload.tlast = True
print("Tlast sent!")
else:
payload.tlast = False
payload.data =data2send
self.tx_axis.b_transport(payload)
#Send the data to the ipc master
print("Finished sending")
def run_test(self, ipc=False):
if ipc:
self.tx_axis = ipc_axis_master_util("tx_iqdata")
self.rx_axis = ipc_axis_slave_util("rx_iqdata")
#Create both Master and Slave ipc utils.
#The argument strings must match the names in system.cfg
self.tx_to_aie(self.convert_numpy_to_bytes(),False)
print("Data sent to AIE. Waiting for results...this may take a few minutes")
if ipc:
p= mp.Process(target=self.rx_from_aie())
p.start()
aie_output = self.parent_conn0.recv()
print("Data received from AIE ")
p.join()
if (not self.supressplots):
self.plot_results(self.input_cplx_data,aie_output)
input("Enter any key to end simulation")
self.rx_axis.disconnect()
self.tx_axis.end_of_simulation()
print("Disconnected all IPC handles.. done!")
def convert_numpy_to_bytes(self):
L = len(self.input_cplx_data)
data = self.input_cplx_data
if(self.aietype == "cint16"):
rVec = np.real(data).astype(np.int16)
iVec = np.imag(data).astype(np.int16)
out2column = np.zeros((L,2)).astype(np.int16)
elif(self.aietype =='cfloat'):
print("cfloat!")
rVec = np.real(data)
iVec = np.imag(data)
out2column = np.zeros((L,2)).astype(np.single)
else:
print("Not supported type!")
out2column[:,0] = rVec
out2column[:,1] = iVec
#print("Byte array to send")
#print(''.join(r'\x'+hex(letter)[2:] for letter in out2column.tobytes()))
#print("outlen=")
return out2column.tobytes()
def convert_bytes_to_numpy(self,byte_arry):
if(self.aietype == "cint16"):
formatString = "<"+str(len(byte_arry)//2)+"h"
upack = struct.unpack(formatString, byte_arry)
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
elif(self.aietype =='cfloat'):
formatString = "<"+str(len(byte_arry)//4)+"f"
upack = struct.unpack(formatString, byte_arry)
print("Len Rx Array=")
print(len(byte_arry))
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
else:
print("Not supported type!")
cVec = np.array(rvec) + 1j*np.array(ivec)
return cVec
def plot_results(self,aie_in,aie_out,style='t'):
##AIE IN
# plt.plot( list(range(0,len(aie_in))) ,np.real(aie_in),label ="aie_in R")
# plt.plot( list(range(0,len(aie_in))) ,np.imag(aie_in),label ="aie_in I")
#Perform Golden Operation on AIE IN to generate Golden/reference output
golden_iq_out = np.fft.fftshift(np.fft.fft(aie_in))
golden_iq_out = golden_iq_out/4 #DSPLIB FFT HAS OUTPUT = MATLAB*4. Compensate for this.
aie_out_shft = np.fft.fftshift(aie_out)
plt.plot( list(range(0,len(golden_iq_out))),np.abs(golden_iq_out),label ="Golden FFT - MAG",marker="+")
plt.plot( list(range(0,len(aie_out))) ,np.abs(aie_out_shft),label ="AIE OUT - MAG")
plt.legend()
plt.show()
def MakeInputStim(Nsamps=1024):
n = np.arange(0,Nsamps)
Fs = 245.76e6
Ftone = 30.72e6/16
cplxTone = 1.0* np.exp(-2*1j*np.pi*Ftone/Fs*n)
Nbits = 16-4
Nbits = Nbits -2 #removed additional bits to help with FFT processing gain
mx = np.max(np.abs(cplxTone))
cscaled = np.round(cplxTone/mx * 2**(Nbits-1)-1)
return cscaled
def MakeCountingPattern(Nsamps=16):
n = np.arange(0,Nsamps)
nneg = -1*np.arange(0,Nsamps)
cscale = n + 1j*nneg
return cscale
if __name__ == "__main__":
for i, arg in enumerate(sys.argv):
if( i == 1):
cmd_line_pliowidth = int(arg)
if( i == 2):
skipplots=int(arg)
print(skipplots)
NSamps=128
iqdata =MakeInputStim(NSamps)
#iqdata = MakeCountingPattern(NSamps)
obj = IQData(iqdata,aietype="cint16",plio_width=cmd_line_pliowidth,supressplots=skipplots)
obj.run_test(ipc=True)
print("TEST PASSED")
| 32.191489 | 125 | 0.598414 |
dbabd978df29e9e193fa4b56bc9f21649f73fe4e
| 464 |
py
|
Python
|
leetcode/011-Container-With-Most-Water/ContainerWMW_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-12-29T03:26:39.000Z
|
2016-12-29T03:26:39.000Z
|
leetcode/011-Container-With-Most-Water/ContainerWMW_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/011-Container-With-Most-Water/ContainerWMW_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
# @ cchen
# based on the lecture of 'Two Pointers' in Advanced Programming Class
class Solution:
# @param {integer[]} height
# @return {integer}
def maxArea(self, height):
i, j = 0, len(height) - 1
res = 0
while i < j:
if height[i] < height[j]:
i += 1
else:
j -= 1
tmp = min(height[i], height[j]) * (j - i)
res = max(res, tmp)
return res
| 24.421053 | 70 | 0.465517 |
dbdafdf0fa98af2e958d720b4530afcc3062cfb7
| 5,400 |
py
|
Python
|
pymongo/member.py
|
dxmahata/TwitterSentimentAnalysis
|
b228747dfa74266853d4f8e6d4bc4eb29c080dd3
|
[
"MIT"
] | 12 |
2017-12-15T06:50:16.000Z
|
2020-02-02T12:22:09.000Z
|
pymongo/member.py
|
dxmahata/TwitterSentimentAnalysis
|
b228747dfa74266853d4f8e6d4bc4eb29c080dd3
|
[
"MIT"
] | 1 |
2016-05-09T16:13:48.000Z
|
2016-05-09T16:13:48.000Z
|
pymongo/member.py
|
dxmahata/TwitterSentimentAnalysis
|
b228747dfa74266853d4f8e6d4bc4eb29c080dd3
|
[
"MIT"
] | 4 |
2018-01-16T01:26:22.000Z
|
2019-04-30T13:42:38.000Z
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent a mongod / mongos instance"""
from pymongo import common
from pymongo.errors import ConfigurationError
from pymongo.read_preferences import ReadPreference
# Member states
PRIMARY = 1
SECONDARY = 2
ARBITER = 3
OTHER = 4
# TODO: rename 'Server' or 'ServerDescription'.
class Member(object):
"""Immutable representation of one server.
:Parameters:
- `host`: A (host, port) pair
- `connection_pool`: A Pool instance
- `ismaster_response`: A dict, MongoDB's ismaster response
- `ping_time`: A MovingAverage instance
"""
# For unittesting only. Use under no circumstances!
_host_to_ping_time = {}
def __init__(self, host, connection_pool, ismaster_response, ping_time):
self.host = host
self.pool = connection_pool
self.ismaster_response = ismaster_response
self.ping_time = ping_time
self.is_mongos = (ismaster_response.get('msg') == 'isdbgrid')
if ismaster_response['ismaster']:
self.state = PRIMARY
elif ismaster_response.get('secondary'):
self.state = SECONDARY
elif ismaster_response.get('arbiterOnly'):
self.state = ARBITER
else:
self.state = OTHER
self.set_name = ismaster_response.get('setName')
self.tags = ismaster_response.get('tags', {})
self.max_bson_size = ismaster_response.get(
'maxBsonObjectSize', common.MAX_BSON_SIZE)
self.max_message_size = ismaster_response.get(
'maxMessageSizeBytes', 2 * self.max_bson_size)
self.min_wire_version = ismaster_response.get(
'minWireVersion', common.MIN_WIRE_VERSION)
self.max_wire_version = ismaster_response.get(
'maxWireVersion', common.MAX_WIRE_VERSION)
self.max_write_batch_size = ismaster_response.get(
'maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
# self.min/max_wire_version is the server's wire protocol.
# MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports.
if (
# Server too new.
common.MAX_SUPPORTED_WIRE_VERSION < self.min_wire_version
# Server too old.
or common.MIN_SUPPORTED_WIRE_VERSION > self.max_wire_version
):
raise ConfigurationError(
"Server at %s:%d uses wire protocol versions %d through %d, "
"but PyMongo only supports %d through %d"
% (self.host[0], self.host[1],
self.min_wire_version, self.max_wire_version,
common.MIN_SUPPORTED_WIRE_VERSION,
common.MAX_SUPPORTED_WIRE_VERSION))
def clone_with(self, ismaster_response, ping_time_sample):
"""Get a clone updated with ismaster response and a single ping time.
"""
ping_time = self.ping_time.clone_with(ping_time_sample)
return Member(self.host, self.pool, ismaster_response, ping_time)
@property
def is_primary(self):
return self.state == PRIMARY
@property
def is_secondary(self):
return self.state == SECONDARY
@property
def is_arbiter(self):
return self.state == ARBITER
def get_avg_ping_time(self):
"""Get a moving average of this member's ping times.
"""
if self.host in Member._host_to_ping_time:
# Simulate ping times for unittesting
return Member._host_to_ping_time[self.host]
return self.ping_time.get()
def matches_mode(self, mode):
assert not self.is_mongos, \
"Tried to match read preference mode on a mongos Member"
if mode == ReadPreference.PRIMARY and not self.is_primary:
return False
if mode == ReadPreference.SECONDARY and not self.is_secondary:
return False
# If we're not primary or secondary, then we're in a state like
# RECOVERING and we don't match any mode
return self.is_primary or self.is_secondary
def matches_tags(self, tags):
"""Return True if this member's tags are a superset of the passed-in
tags. E.g., if this member is tagged {'dc': 'ny', 'rack': '1'},
then it matches {'dc': 'ny'}.
"""
for key, value in tags.items():
if key not in self.tags or self.tags[key] != value:
return False
return True
def matches_tag_sets(self, tag_sets):
"""Return True if this member matches any of the tag sets, e.g.
[{'dc': 'ny'}, {'dc': 'la'}, {}]
"""
for tags in tag_sets:
if self.matches_tags(tags):
return True
return False
def __str__(self):
return '<Member "%s:%s" primary=%r>' % (
self.host[0], self.host[1], self.is_primary)
| 35.761589 | 77 | 0.640926 |
918c6dab7ba4e8985d09aa9ca3eb99114ce0c752
| 11,007 |
py
|
Python
|
PowerEyes/install.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 4 |
2021-04-23T15:39:17.000Z
|
2021-12-27T22:53:24.000Z
|
PowerEyes/install.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | null | null | null |
PowerEyes/install.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 2 |
2021-04-19T08:28:54.000Z
|
2022-01-19T13:23:29.000Z
|
#!/usr/bin/python3
#coding:utf-8
import os
import subprocess
import hashlib
import platform
import sys
import time
banner = '''
_/_/_/ _/
_/ _/_/ _/_/_/_/ _/ _/ _/_/_/
_/_/ _/_/_/_/ _/ _/ _/ _/ _/
_/ _/ _/ _/ _/ _/ _/
_/_/_/ _/_/_/ _/_/ _/_/_/ _/_/_/
_/
_/
By Dxvistxr
'''
class tools_path():
gcc = '/usr/bin/gcc'
python27 = '/usr/bin/python2'
python3 = '/usr/bin/python3'
wine = '/usr/bin/wine'
tdm_gcc = '/root/.wine/drive_c/TDM-GCC-64'
python27_wine = '/root/.wine/drive_c/Python27'
base64 = '/usr/bin/base64'
def sys_required():
if 'Linux' not in platform.platform():
sys.exit('[*] Linux System Required !')
def debian():
sys_required()
os.system('clear')
os.system('clear')
print('\033[1;96m'+banner)
print('\n')
print('\033[1;96m[*] \033[1;92m Updating Package....')
subprocess.Popen('apt update', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
print('\033[1;96m[*] \033[1;92m Upgrading Package....')
subprocess.Popen('apt upgrade -y', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
print('\033[1;96m[*] \033[1;92m Autoremoving Package....')
subprocess.Popen('apt autoremove -y', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
print('\033[1;96m[*] \033[1;92m[*] Checking Tools.....')
time.sleep(0.5)
print('\033[1;96m[*] \033[1;92m[*] Check GCC')
check_gcc = os.path.exists(tools_path.gcc)
if check_gcc ==True:
print('\033[1;92m[*] GCC FOUND !')
else:
print('\033[1;91m[*] GCC NOT FOUND !')
install_gcc = str(input('\033[1;92m[*] Do You Want Install (yes/no) > '))
if install_gcc =='yes':
os.system('apt update && apt upgrade -y && apt install gcc -y')
elif install_gcc =='no':
sys.exit('\033[1;91m[*] GCC NOT INSTALLED EXITING...')
checkpython27 = os.path.exists(tools_path.python27)
if checkpython27 ==True:
print('\033[1;92m[*] PYTHON2.7 FOUND !')
else:
print('\033[1;91m[*] PYTHON27 NOT FOUND !')
install_python27 = str(input('\033[1;92m[*] Do You Want Install (yes/no) > '))
if install_python27 =='yes':
os.system('apt update && apt upgrade -y && apt install python2 -y')
elif install_python27 =='no':
sys.exit('\033[1;91m[*] PYTHON27 NOT INSTALLED EXITING')
checkpython3 = os.path.exists(tools_path.python3)
if checkpython3 ==True:
print('\033[1;92m[*] PYTHON3 FOUND !')
else:
print('\033[1;91m[*] PYTHON3 NOT FOUND !')
install_python3 = str(input('\033[1;92m[*] Do You Want Install (yes/no) > '))
if install_python3 =='yes':
print('\033[1;92m[*] Installing Python3....')
os.system('apt update && apt upgrade -y && apt install python3 -y')
elif install_python3 =='no':
sys.exit('\033[1;91m[*] PYTHON3 NOT FOUND !')
check_wine = os.path.exists(tools_path.wine)
if check_wine ==True:
print('\033[1;92m[*] WINE FOUND !')
else:
print('\033[1;91m[*] WINE NOT FOUND !')
installing_wine = str(input('\033[1;92m[*] Do You Want Install wine (yes/no) > '))
if installing_wine =='yes':
print('\033[1;92m[\033[1;93m1\033[1;92m] Install Wine With APT')
print('\033[1;92m[\033[1;93m2\033[1;92m] Install Wine With WGET (DEBIAN)')
print('\033[1;92m[\033[1;93m3\033[1;92m] Install Wine With WGET (UBUNTU & LINUX MINT)')
print('\033[1;92m[\033[1;93m4\033[1;92m] Exiting Setup')
choice_install_wine = int(input('\033[1;92m[*] Select Installation Method For Wine : '))
if choice_install_wine ==1:
print('\033[1;92m[*] Installing Wine...')
os.system('apt update && apt upgrade -y && apt install wine wine-stable -y')
elif choice_install_wine ==2:
os.system('apt update && apt upgrade -y && wget -nc https://dl.winehq.org/wine-builds/winehq.key')
os.system('sudo apt-key add winehq.key')
print('\033[1;92m[\033[1;93m1\033[1;92m] Debian8 (Jessie)')
print('\033[1;92m[\033[1;93m2\033[1;92m] Debian9 (Stretch)')
print('\033[1;92m[\033[1;93m3\033[1;92m] Debian10 (Currently Testing) (Buster)')
print('\n')
choice_install_packet_wine = int(input('\033[1;92m[*] Choice Wine Packet : '))
if choice_install_packet_wine ==1:
print('\033[1;92m[*] Creating winehq.list....')
print('\033[1;92m[*] Writing => \033[1;91mdeb \033[1;93mhttps://dl.winehq.org/wine-builds/debian/ \033[1;95mjessie \033[1;91mmain')
print('\033[1;92m[*] Wait Moment Please...')
check_apt = os.path.exists('/etc/apt/sources.list.d')
if check_apt ==True:
f=open('/etc/apt/sources.list.d/winehq.list','w')
f.write('deb https://dl.winehq.org/wine-builds/debian/ jessie main')
f.close()
print('\033[1;92m[*] Updating Packet....')
os.system('apt update')
os.system('apt install --install-recommends winehq-stable')
else:
print('\033[1;91m[*] ETC/APT/SOURCES.LIST.D NOT FOUND !')
sys.exit()
elif choice_install_packet_wine ==2:
print('\033[1;92m[*] Creating winehq.list....')
print('\033[1;92m[*] Writing => \033[1;91mdeb \033[1;93mhttps://dl.winehq.org/wine-builds/debian/ \033[1;95mstretch \033[1;91mmain')
print('\033[1;92m[*] Wait Moment Please....')
check_apt = os.path.exists('/etc/apt/sources.list.d')
if check_apt ==True:
f=open('/etc/apt/sources.list.d/winehq.list','w')
f.write('deb https://dl.winehq.org/wine-builds/debian/ stretch main')
f.close()
print('\033[1;92m[*] Updating Packet.....')
os.system('apt update')
os.system('apt install --install-recommends winehq-stable -y')
elif choice_install_packet_wine ==3:
print('\033[1;92m[*] Creating winehq.list....')
print('\033[1;92m[*] Writing => deb https://dl.winehq.org/wine-builds/debian/ buster main')
check_apt = os.path.exists('/etc/apt/sources.list.d')
if check_apt ==True:
f=open('/etc/apt/sources.list.d/winehq.list','w')
f.write('deb https://dl.winehq.org/wine-builds/debian/ buster main')
f.close()
print('\033[1;92m[*] Updating Packet....')
os.system('apt update')
os.system('apt install --install-recommends winehq-stable -y')
elif choice_install_packet_wine ==4:
print('\033[1;92m[*] exiting....')
sys.exit()
elif choice_install_wine ==3:
os.system('apt update && apt upgrade -y && apt install wget -y && wget -nc https://dl.winehq.org/wine-builds/winehq.key')
os.system('sudo apt-key add winehq.key')
print('\033[1;92m[\033[1;93m1\033[1;92m] Ubuntu 18.10')
print('\033[1;92m[\033[1;93m2\033[1;92m] Ubuntu 18.04 & Linux Mint 19.x')
print('\033[1;92m[\033[1;93m3\033[1;92m] Ubuntu 16.04 & Linux Mint 18.x')
print('\033[1;92m[\033[1;93m4\033[1;92m] Ubuntu 14.04 & Linux Mint 17.x')
choice_ubuntu_install_wine = int(input('\033[1;92m[*] Choice Packet Wine => '))
if choice_ubuntu_install_wine ==1:
print('\033[1;92m[*] Installing Wine Sources List..')
os.system("sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ cosmic main'")
os.system('apt update && sudo apt install --install-recommends winehq-stable')
elif choice_ubuntu_install_wine ==2:
print('\033[1;92m[*] Installing Wine Sources List...')
os.system("sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ bionic main'")
os.system('apt update && sudo apt install --install-recommends winehq-stable')
elif choice_ubuntu_install_wine ==3:
print('\033[1;92m[*] Installing Wine Sources List....')
os.system("sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ xenial main'")
os.system('apt update && sudo apt install --install-recommends winehq-stable')
elif choice_ubuntu_install_wine ==4:
print('\033[1;92m[*] Installing Wine Sources List...')
os.system("sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ trusty main'")
os.system('apt update && sudo apt install --install-recommends winehq-stable')
check_tdm_gcc = os.path.exists(tools_path.tdm_gcc)
if check_tdm_gcc ==True:
print('\033[1;92m[*] TDM-GCC FOUND !!!')
else:
print('\033[1;91m[*] TDM GCC NOT FOUND !!!')
install_gcc_wine = str(input('\033[1;92m[*] Do You Want Install TDM-GCC (yes/no) > '))
if install_gcc_wine =='yes':
os.system('apt update && apt install wget -y && wget http://download1645.mediafire.com/mzwfmpmvxfdg/dnhcpavttyy5xwt/tdm.exe')
os.system('wine tdm.exe')
elif install_gcc_wine =='no':
sys.exit('\033[1;91m[*] TDM GCC NOT FOUND !')
check_python27_wine = os.path.exists(tools_path.python27_wine)
if check_python27_wine ==True:
print('\033[1;92m[*] PYTHON27 WINE FOUND !')
else:
print('\033[1;91m[*] PYTHON27 NOT FOUND !')
install_python27 = str(input('\033[1;92m[\033[1;93m*\033[1;92m] Do You Want Install Python2.7 Wine (yes/no) > '))
if install_python27 =='yes':
os.system('apt update && apt install wget -y && wget https://www.python.org/ftp/python/2.7.15/python-2.7.15.msi && mv python-2.7.15.msi python27.msi && wine /i msiexec python27.msi')
elif install_python27 =='no':
sys.exit('\033[1;96m[*] Python27 Not Found ! wine')
check_base64 = os.path.exists(tools_path.base64)
if check_base64 ==True:
print('\033[1;92m[*] Base64 Found !')
else:
print('\033[1;91m[*] Base64 Not Found !')
install_coreutils = str(input('\033[1;92m[*] Do You Want Install Coreutils/Base64 (yes/no) > '))
if install_coreutils =='yes':
os.system('apt update && apt upgrade -y && apt install coreutils -y')
elif install_coreutils =='no':
sys.exit()
sys_required()
debian()
| 48.065502 | 191 | 0.564368 |
f465b4654dc71f8a3174d6662633dcf4da13dd5c
| 2,156 |
py
|
Python
|
books/PythonCleanCode/ch3_good_code/test_exceptions_2.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch3_good_code/test_exceptions_2.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch3_good_code/test_exceptions_2.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""Clean Code in Python - Chapter 3: General Traits of Good Code
"""
import unittest
from unittest.mock import Mock, patch
from exceptions_2 import DataTransport, Event
class FailsAfterNTimes:
def __init__(self, n_times: int, with_exception) -> None:
self._remaining_failures = n_times
self._exception = with_exception
def connect(self):
self._remaining_failures -= 1
if self._remaining_failures >= 0:
raise self._exception
return self
def send(self, data):
return data
@patch("time.sleep", return_value=0)
class TestTransport(unittest.TestCase):
def setUp(self):
self.error_log = patch("exceptions_2.logger.error")
self.error_log.start()
def tearDown(self):
self.error_log.stop()
def test_connects_after_retries(self, sleep):
data_transport = DataTransport(
FailsAfterNTimes(2, with_exception=ConnectionError)
)
data_transport.send = Mock()
event = Event("test")
data_transport.deliver_event(event)
assert sleep.call_count == DataTransport.retry_n_times - 1, sleep.call_count
def test_connects_directly(self, sleep):
connector = Mock()
data_transport = DataTransport(connector)
data_transport.send = Mock()
data_transport.deliver_event(Event("test"))
connector.connect.assert_called_once()
assert sleep.call_count == 0
def test_connection_error(self, sleep):
data_transport = DataTransport(Mock(connect=Mock(side_effect=ConnectionError)))
self.assertRaisesRegex(
ConnectionError,
r"Couldn't connect after \d+ times",
data_transport.deliver_event,
Event("connection error"),
)
assert sleep.call_count == DataTransport.retry_n_times
def test_error_in_event(self, sleep):
data_transport = DataTransport(Mock())
event = Mock(decode=Mock(side_effect=ValueError))
self.assertRaises(ValueError, data_transport.deliver_event, event)
assert not sleep.called
if __name__ == "__main__":
unittest.main()
| 29.135135 | 87 | 0.669759 |
f479821ebb37561740faa9f1e056f7615c521ac7
| 2,608 |
py
|
Python
|
Kaggle-Competitions/CrowdFlower/scripts/model_train_plus_test.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
Kaggle-Competitions/CrowdFlower/scripts/model_train_plus_test.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
Kaggle-Competitions/CrowdFlower/scripts/model_train_plus_test.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, chi2
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_selection import SelectPercentile, chi2, RFECV
from sklearn.linear_model import LogisticRegression
def TFIDF(Xtrain, Xwhole):
tfv = TfidfVectorizer(min_df=3, max_df=700, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=True, smooth_idf=True, sublinear_tf=True, stop_words = 'english')
if Xwhole == None:
return (tfv.fit_transform(Xtrain), tfv)
else:
tfv.fit(Xwhole)
return (tfv.transform(Xtrain), tfv)
def build_non_linear_model(Xtrain, y):
svd = TruncatedSVD(n_components=200, algorithm='randomized', n_iter=5, random_state=None, tol=0.0)
scl = StandardScaler(copy=True, with_mean=True, with_std=True)
Xtrain = svd.fit_transform(Xtrain)
Xtrain = scl.fit_transform(Xtrain)
clf = SVC(C=10.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True, probability=False,
tol=0.001, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None)
return (clf.fit(Xtrain, y), svd, scl)
def build_linear_model(X, y):
select = SelectPercentile(score_func=chi2, percentile=20)
clf = SVC(C=10.0, kernel='linear', probability=True)
X = select.fit_transform(X, y)
return (clf.fit(X, y), select)
def build_knn_model(Xtrain, y):
svd = TruncatedSVD(n_components=100, algorithm='randomized', n_iter=5, random_state=None, tol=0.0)
scl = StandardScaler(copy=True, with_mean=True, with_std=True)
Xtrain = svd.fit_transform(Xtrain)
Xtrain = scl.fit_transform(Xtrain)
clf = KNeighborsClassifier(n_neighbors=5, weights='distance', algorithm='brute')
return (clf.fit(Xtrain, y), svd, scl)
def feature_selection(X, y, pipeline, step=1, cv=None, scoring=None, estimator_params=None, verbose=0):
selector = RFECV(pipeline, step=step, cv=cv, scoring=scoring, estimator_params=estimator_params, verbose=verbose)
selector.fit(X, y)
return selector
def make_predictions(model, options, Xtest):
if options.has_key('tfv'):
Xtest = options['tfv'].transform(Xtest)
if options.has_key('svd'):
Xtest = options['svd'].transform(Xtest)
if options.has_key('scl'):
Xtest = options['scl'].transform(Xtest)
if options.has_key('select'):
Xtest = options['select'].transform(Xtest)
return model.predict(Xtest)
| 33.87013 | 114 | 0.755752 |
be3045744eceafafab62eacefd65e9b627f1ad2b
| 1,057 |
py
|
Python
|
Theories/DataStructures/BinaryTree/PopulatingNextRightPointersEachNode/populate_next_right_pointers_each_node.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Theories/DataStructures/BinaryTree/PopulatingNextRightPointersEachNode/populate_next_right_pointers_each_node.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Theories/DataStructures/BinaryTree/PopulatingNextRightPointersEachNode/populate_next_right_pointers_each_node.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
# Recursive
def connect(root: 'Node') -> 'Node':
node_level = {}
def populate(cur_node: 'Node', level: int) -> 'Node':
if not cur_node:
return Node()
if level not in node_level:
node_level[level] = [None]
cur_node.next = node_level[level].pop()
node_level[level].append(cur_node)
populate(cur_node.right, level + 1)
populate(cur_node.left, level + 1)
populate(root, 0)
return root
# Iterative O(1) space
# def connect(root: 'Node') -> 'Node':
# curr = root
# while curr and curr.left:
# left = curr.left
# while curr:
# curr.left.next = curr.right
# curr.right.next = curr.next.left if curr.next else None
# curr = curr.next
# curr = left
# return root
| 27.815789 | 101 | 0.558184 |
fe610d59722277e24965e2dde127c90dc8c0ec8a
| 1,106 |
py
|
Python
|
finbyz_dashboard/finbyz_dashboard/report/purchase_order_trends.py
|
finbyz/finbyz_dashboard
|
9c58ab7bccf589bc010d0e5bf95b20cadd4d71f0
|
[
"MIT"
] | 1 |
2021-11-19T05:27:11.000Z
|
2021-11-19T05:27:11.000Z
|
finbyz_dashboard/finbyz_dashboard/report/purchase_order_trends.py
|
finbyz/finbyz_dashboard
|
9c58ab7bccf589bc010d0e5bf95b20cadd4d71f0
|
[
"MIT"
] | null | null | null |
finbyz_dashboard/finbyz_dashboard/report/purchase_order_trends.py
|
finbyz/finbyz_dashboard
|
9c58ab7bccf589bc010d0e5bf95b20cadd4d71f0
|
[
"MIT"
] | 2 |
2021-08-21T10:41:38.000Z
|
2021-11-19T05:27:13.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.controllers.trends import get_columns,get_data
def execute(filters=None):
if not filters: filters ={}
data = []
conditions = get_columns(filters, "Purchase Order")
data = get_data(filters, conditions)
chart_data = get_chart_data(data, filters)
return conditions["columns"], data, None, chart_data
def get_chart_data(data, filters):
if not data:
return []
labels, datapoints = [], []
if filters.get("group_by"):
# consider only consolidated row
data = [row for row in data if row[0]]
data = sorted(data, key = lambda i: i[-1], reverse=True)
if len(data) > 10:
# get top 10 if data too long
data = data[:10]
for row in data:
labels.append(row[0])
datapoints.append(row[-1])
return {
"data": {
"labels" : labels,
"datasets" : [
{
"name": _("Total Amount"),
"values": datapoints
}
]
},
"type" : "bar",
"colors":["#5e64ff"]
}
| 22.12 | 68 | 0.673599 |
22fa1d57675b34cc8bd00b36fc11b9faccb37e3d
| 1,284 |
py
|
Python
|
0101symmetric-tree.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
0101symmetric-tree.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
0101symmetric-tree.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
'''
遍历
'''
def isSymmetric(self, root):
if root == None:
return True
deQueue = [root.left, root.right]
while len(deQueue):
leftNode = deQueue[0]
del deQueue[0]
rightNode = deQueue.pop()
if leftNode and rightNode:
if leftNode.val != rightNode.val:
return False
else:
deQueue.insert(0, leftNode.right)
deQueue.insert(0, leftNode.left)
deQueue.append(rightNode.left)
deQueue.append(rightNode.right)
elif leftNode or rightNode:
return False
return True
if __name__ == "__main__":
s = Solution()
a1 = TreeNode(1)
a2 = TreeNode(2)
a3 = TreeNode(2)
a4 = TreeNode(3)
a5 = TreeNode(4)
a6 = TreeNode(4)
a7 = TreeNode(3)
a1.left = a2
a1.right = a3
a2.left = a4
a2.right = a5
a3.left = a6
a3.right = a7
print(s.isSymmetric(a1))
| 24.226415 | 53 | 0.479751 |
fe02376a9eac4f1480509625457317b840533b2b
| 1,825 |
py
|
Python
|
bos/txt2csv.py
|
DennisTobola/jskos-data
|
d32837447b01498fc4c05c49c534d4ab4faae35f
|
[
"CC0-1.0"
] | 7 |
2015-10-02T15:43:56.000Z
|
2021-12-07T11:20:46.000Z
|
bos/txt2csv.py
|
DennisTobola/jskos-data
|
d32837447b01498fc4c05c49c534d4ab4faae35f
|
[
"CC0-1.0"
] | 22 |
2015-11-06T14:33:55.000Z
|
2022-03-31T11:48:11.000Z
|
bos/txt2csv.py
|
DennisTobola/jskos-data
|
d32837447b01498fc4c05c49c534d4ab4faae35f
|
[
"CC0-1.0"
] | 4 |
2018-10-17T13:06:57.000Z
|
2022-03-14T08:46:35.000Z
|
#!/usr/bin/env python3
import re
import csv
from namedentities import unicode_entities
# Öffne sys.txt zum Schreiben
csvfile = open('sys.csv', 'w', newline = '')
csvwriter = csv.writer(csvfile)
csvwriter.writerow(("level","notation","prefLabel"))
# Bereits verarbeitete Notationen
seen = set()
# Gibt einen Datensatz als CSV aus
record = {}
def process_record():
global record
global csvwriter
if bool(record) == True:
if "hie" in record and "syt" in record and "syn" in record:
notation = record["syt"]
# Bereits verarbeitete Notationen
if notation in seen:
print("Repeated Notation:", notation)
elif not re.search('^[a-z]{3} 000$', notation): # z.B. "all 000"
seen.add(notation)
label = record["syn"]
level = int(record["hie"])
base = notation[0:3]
if base in top:
csvwriter.writerow((0, base, label))
top.pop(base)
row = (level, notation, label)
csvwriter.writerow(row)
record = {}
# Liest top.csv in ein dictionary
top = {}
with open('top.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
notation = row[0]
label = row[1]
top[notation] = label
# Konvertierung
with open("sys.txt", "r") as ins:
for line in ins:
match = re.search("^#([^:]+): (.+)", line)
if (match):
key = match.group(1)
value = unicode_entities(match.group(2))
record[key] = value
elif bool(re.findall("SET", line)):
process_record()
# Letzten Datensatz ebenfalls ausgeben
process_record()
csvfile.close()
| 29.435484 | 76 | 0.546849 |
fe0bdad5faf27282abb24a685616dfeae353996d
| 637 |
py
|
Python
|
pacman-arch/test/pacman/tests/replace102.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/replace102.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/replace102.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Sysupgrade with a versioned replacement, original stays"
sp1 = pmpkg("python2-yaml", "5-1")
sp1.replaces = ["python-yaml<5"]
sp1.conflicts = ["python-yaml<5"]
sp1.files = ["lib/python2/file"]
self.addpkg2db("sync", sp1)
# the python3 version
sp2 = pmpkg("python-yaml", "5-1")
sp2.files = ["lib/python3/file"]
self.addpkg2db("sync", sp2)
lp1 = pmpkg("python-yaml", "4-1")
lp1.files = ["lib/python2/file"]
self.addpkg2db("local", lp1)
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=python-yaml")
self.addrule("PKG_VERSION=python2-yaml|5-1")
self.addrule("FILE_EXIST=lib/python2/file")
| 26.541667 | 76 | 0.704867 |
60bce6adb4c4576ccc410124b6676cb03c1d2123
| 1,359 |
py
|
Python
|
test/test_rohmu_dates.py
|
st3fan/sphinx-automation-experiment
|
c92c8400770c6c604e2451e4f1e71957fc4c5ef8
|
[
"Apache-2.0"
] | 731 |
2018-06-01T21:48:43.000Z
|
2022-03-29T08:21:42.000Z
|
test/test_rohmu_dates.py
|
st3fan/sphinx-automation-experiment
|
c92c8400770c6c604e2451e4f1e71957fc4c5ef8
|
[
"Apache-2.0"
] | 124 |
2018-06-19T05:59:50.000Z
|
2022-03-31T18:17:59.000Z
|
test/test_rohmu_dates.py
|
st3fan/sphinx-automation-experiment
|
c92c8400770c6c604e2451e4f1e71957fc4c5ef8
|
[
"Apache-2.0"
] | 64 |
2018-06-26T14:12:53.000Z
|
2022-03-20T07:33:33.000Z
|
"""
rohmu test case
Copyright (c) 2017 Ohmu Ltd
See LICENSE for details
"""
import datetime
import re
import dateutil.tz
from pghoard.rohmu.dates import parse_timestamp
def test_parse_timestamp():
local_aware = datetime.datetime.now(dateutil.tz.tzlocal())
# split local_aware such as "2021-02-08T09:58:27.988218-05:00" into date, time, tzoffset components
str_date, str_localtime_aware = local_aware.isoformat().split("T", 1)
str_localtime_naive = re.split("[+-]", str_localtime_aware, maxsplit=1)[0]
str_local_aware_named = "{}T{} {}".format(str_date, str_localtime_naive, local_aware.tzname())
assert parse_timestamp(str_local_aware_named) == local_aware
local_naive = parse_timestamp(str_local_aware_named, with_tz=False, assume_local=True)
assert local_naive == local_aware.replace(tzinfo=None)
str_unknown_aware = "2017-02-02 12:00:00 XYZ"
unknown_aware_utc = parse_timestamp(str_unknown_aware)
assert unknown_aware_utc.tzinfo == datetime.timezone.utc
assert unknown_aware_utc.isoformat() == "2017-02-02T12:00:00+00:00"
if local_aware.tzname() in ["EET", "EEST"]:
unknown_aware_local = parse_timestamp(str_unknown_aware, assume_local=True)
assert unknown_aware_local.tzinfo == dateutil.tz.tzlocal()
assert unknown_aware_local.isoformat() == "2017-02-02T12:00:00+02:00"
| 37.75 | 103 | 0.745401 |
71b24acd2e6f3dda9e4ad9c895364b9a583826ef
| 216,404 |
py
|
Python
|
samuri6.py
|
Redsamuribot/vpssamu
|
a8bdeb76d577536e064b761b6543b626c5900088
|
[
"MIT"
] | null | null | null |
samuri6.py
|
Redsamuribot/vpssamu
|
a8bdeb76d577536e064b761b6543b626c5900088
|
[
"MIT"
] | null | null | null |
samuri6.py
|
Redsamuribot/vpssamu
|
a8bdeb76d577536e064b761b6543b626c5900088
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia, goslate
import timeit
from bs4 import BeautifulSoup
from urllib import urlopen
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
import six
if (six.PY2):
import urllib2
import urllib
else:
import urllib.request
import urllib.parse
cl = LINETCR.LINE()
cl.login(token='')
cl.loginResult()
print "=====[Login Success]====="
reload(sys)
sys.setdefaultencoding('utf-8')
helpmsg ="""╔═════════════════
╠ Selft Command
╠➩〘$,Me〙
╠➩〘เปลี่ยนชื่อ: 〙
╠➩〘เปลี่ยนตัส: 〙
╠➩〘ชื่อ〙
╠➩〘ตัส〙
╠➩〘รูปโปร〙
╠➩〘รูปปก〙
╠➩〘เช็คบอท〙
╠➩〘Sp/Speed〙
╠➩〘แนะนำตัว〙
╠➩〘ไอดี @〙
╠➩〘รูปโปร @〙
╠➩〘คท @〙
╠➩〘ข้อมูล @〙
╠➩〘ชื่อ @〙
╠➩〘ตัส @〙
╠➩〘รูปโปร @〙
╠➩〘รูปปก @〙
╠➩〘ใครแทค/แจ๊ะ〙
╠➩〘เปิด/ปิดสแกน〙
╠➩〘จับ/อ่าน〙
╠➩〘หวด @〙
╠➩〘ปลิว:〙
╠➩〘เช็ค:〙
╠➩〘Bot on/off〙
╠➩〘จับ〙
╠➩〘อ่าน〙
╠➩〘กันรัน〙
╠➩〘ลบรัน〙
╠➩〘ลบแชท〙
╠➩〘Mimic on/off〙
╠➩〘Micadd @〙
╠➩〘Micdel @〙
╠
╠ 〘Help1-3〙
╚════════════════════════════════
"""
helpset ="""╔═════════════════
╠ Setting Command
╠➩〘My simisimi on/off〙
╠➩〘เปิด/ปิดกัน〙
╠➩〘เปิด/ปิดกันลิ้ง〙
╠➩〘เปิด/ปิดกันเชิญ〙
╠➩〘เปิด/ปิดกันยก〙
╠➩〘เปิด/ปิดแทคเจ็บ〙
╠➩〘เปิด/ปิดคท〙
╠➩〘เปิด/ปิดเข้า〙
╠➩〘เปิด/ปิดออก〙
╠➩〘เปิด/ปิดแอด〙
╠➩〘Like me〙
╠➩〘Like friend〙
╠➩〘เปิด/ปิดไลค์〙
╠➩〘เปิด/ปิดเม้น〙
╠➩〘เปิด/ปิดแทค〙
╠➩〘เปิด/ปิดอ่าน〙
╠➩〘เปิด/ปิดแทครูป〙
╠➩〘เปิด/ปิดก๊อก〙
╠➩〘เปิด/ปิดแสกน〙
╚═════════════════
"""
helpgrup ="""╔═════════════════
╠ Group Command
╠➩〘เปิด/ปิดลิ้ง〙
╠➩〘ลอคชื่อ/ปิดลอค〙
╠➩〘ลิ้ง〙
╠➩〘แซว〙
╠➩〘แอด〙
╠➩〘แอด.ทั้งห้อง〙
╠➩〘รายชื่อสมาชิก〙
╠➩〘หวด @〙
╠➩〘ทดสอบ @〙
╠➩〘ข้อมูล @〙
╠➩〘เปลี่ยนชื่อกลุ่ม: 〙
╠➩〘ข้อมูลกลุ่ม〙
╠➩〘รูปกลุ่ม〙
╠➩〘ลิ้งรูปกลุ่ม〙
╠➩〘ไอดีกลุ่ม〙
╠➩〘รายชื่อกลุ่ม〙
╠➩〘รายชื่อเพื่อน〙
╠➩〘บัญชีดำ〙
╠➩〘แบน @〙
╠➩〘ล้างแบน @〙
╠➩〘เครีย์แบน〙
╠➩〘เช็คดำ〙
╠➩〘เช็คบล็อค〙
╠➩〘คทแบน〙
╠➩〘ไอดีแบน〙
╠➩〘#BanAll〙
╠➩〘#UnbanAll〙
╚═════════════════
"""
helpmed ="""╔═════════════════
╠ Social Media Command
╠➩〘ปฏิทิน〙
╠➩〘อู้-Id〙
╠➩〘อู้-En〙
╠➩〘อู้-Jp〙
╠➩〘อู้-Ko〙
╠➩〘th-id〙
╠➩〘th-en〙
╠➩〘th-jp〙
╠➩〘th-ko〙
╠➩〘th@id〙
╠➩〘th@en〙
╠➩〘th@jp〙
╠➩〘th@ko〙
╠➩〘th@ar〙
╠➩〘say-id〙
╠➩〘say-en〙
╠➩〘say-jp〙
╠➩〘say-ko〙
╠➩〘say-Th〙
╠➩〘S.ay (ข้อความ)〙
╠➩〘ไอ.จี (ชื่อยูส)〙
╠➩〘เฟส.บุค〙
╠➩〘ส่อง.เฟส (ชื่อเฟส) 〙
╠➩〘wiki.pedia (ข้อความ)〙
╠➩〘Twit.ter (ชนิดหรือชื่อทวิท)〙
╠➩〘sm.ule (ข้อความ)〙
╠➩〘gi.thub (ข้อความ)〙
╠➩〘วี.ดีโอ (ชื่อวีดี.โอ)〙
╠➩〘เพล.สโต (ชื่อแอพ)〙
╠➩〘รู.ป (ชื่อรูป)〙
╠➩〘กู.เกิ้ล (ข้อความ)〙
╠➩〘ยู.ทูป (ข้อความ)〙
╚═════════════════
"""
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
KAC = [cl]
mid = cl.getProfile().mid
Bots = [mid]
admin = "ub5abe828cd964292195c3c59d6322033"
Creator = "ub5abe828cd964292195c3c59d6322033"
wait = {
"likeOn":False,
"alwayRead":False,
"detectMention":True,
'Tagvirus':False,
"kickMention":False,
"steal":False,
'stiker':False,
"gift":False,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":50},
'BotCancel':False,
"AutoJoinCancel":True,
"memberscancel":10,
"Members":1,
'leaveRoom':False,
"Selfbot":False,
'timeline':True,
'autoAdd':False,
'autoBlock':True,
'AutoKick':False,
'message':"""🌾(●´з`)♡🌹แอดมาทำไมคับ 🌸แอดมาจีบรึแอดมารัน🌹(´ε` )♡🌾""",
"lang":"JP",
"comment":"""
🌟
🚩🔱🚩
👍AutoLike by👍
🌾RED BOT LINE THAILAND🌾
─┅═✥👊ᵀᴴᴬᴵᴸᴬᴺᴰ👊✥═┅─
🎎 💀[RED SAMURI SELFBOT]💀 🎎
╔══╗────────╔╗────────────────
║═╦╝╔═╗─╔══╗╠╣╔╗─╔╦╗ ╔══╗─╔╦╗
║╔╝─║╬╚╗║║║║║║║╚╗║║║ ║║║║ ║║║
╚╝──╚══╝╚╩╩╝╚╝╚═╝╠╗║ ╚╩╩╝ ╠╗║
─────────────────╚═╝───── ╚═╝──""",
"commentOn":False,
"commentBlack":{},
"comment1":"""
▄▄▄RED SAMURI SELFBØT▄▄▄
█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█
█ 🌾RED BOT LINE THAILAND🌾
█ ─┅═✥👊ᵀᴴᴬᴵᴸᴬᴺᴰ👊✥═┅─
█ 💀 [ RED SAMURI BOT] 💀
█ 💀💀💀💀💀💀💀💀💀
█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█ """,
"wblack":False,
"dblack":False,
"clock":False,
"cNames":"─═ই꫞ஆัঐ௫နιшิa७꫞ ࿐)",
"cNames":" ─┅͜͡✥ه﷽ Red﷽ه✥͜͡",
"winvite":False,
"Sambutan":True,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"pname":False,
"pname":{},
"pro_name":{},
"Sider":{},
"Backup":{},
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"copy":False,
"copy2":False,
"status":False,
"BlGroup":{}
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
contact = cl.getProfile()
profile = cl.getProfile()
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+1)
end_content = s.find(',"ow"',start_content+1)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d ชั่วโมง %02d นาที %02d วินาที ' % (hours, mins, secs)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def downloadFileWithURL(self, fileUrl):
saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = self.get_content(fileUrl)
if r.status_code == 200:
with open(saveAs, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return saveAs
else:
raise Exception('Download file failure.')
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise (e)
def sendVideo(self, to_, path):
M = Message(to=to_,contentType = 2)
M.contentMetadata = {
'VIDLEN' : '0',
'DURATION' : '0'
}
M.contentPreview = None
M_id = self.Talk.client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'video',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendVideoWithURL(self, to_, url):
path = 'pythonLines.data'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download Audio failure.')
try:
self.sendVideo(to_, path)
except Exception as e:
raise e
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 0:
return
if op.type == 5:
if wait["autoBlock"] == True:
cl.blockContact(op.param1)
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
# Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nรู้นะว่าอ่านอยู่. . .\nออกมาคุยเดี๋ยวนี้ (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
else:
cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nนี่ก็อีกคน. . .อ่านอย่างเดียวเลย\nไม่ออกมาคุยล่ะ (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
else:
cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nแอบกันจังเลยนะ???\nคิดว่าเป็นนินจารึไง...??😆😆 ")
time.sleep(0.2)
summon(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 22:
cl.leaveRoom(op.param1)
if op.type == 21:
cl.leaveRoom(op.param1)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"Maaf " + cl.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
else:
if wait["AutoCancel"] == True:
if op.param3 in Bots:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"] == True:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in mid:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,"สวัสดี " + cl.getContact(op.param2).displayName + "\nยินดีต้อนรับเข้าสู่กลุ่ม ☞ " + str(ginfo.name) + " ☜" + "\nเข้ามาแล้วอย่าลืมดูที่โน๊ตกลุ่มด้วยนะ\nอย่าลืมปิดเสียงแจ้งเตือนด้วยล่ะ ^_^")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "23701829",
"STKPKGID": "1740802",
"STKVER": "1" }
cl.sendMessage(d)
print "MEMBER JOIN TO GROUP"
if op.type == 19:
if wait["Sambutan"] == True:
if op.param2 in mid:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "คืออยังมันโหดแท้ว่ะ(|||゚д゚)")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "22832861",
"STKPKGID": "1705396",
"STKVER": "1" }
cl.sendMessage(d)
print "MEMBER KICK OUT FORM GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in mid:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,"Goodbye.. " + cl.getContact(op.param2).displayName + "\nแล้วเจอกันใหม่นะ. . . (p′︵‵。) 🤗")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "23701835",
"STKPKGID": "1740802",
"STKVER": "1" }
cl.sendAudio(msg.to,'tts.mp3')
cl.sendMessage(d)
print "MEMBER HAS LEFT THE GROUP"
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if op.type == 25:
msg = op.message
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment"])
if op.type == 26:
msg = op.message
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment"])
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[ChatBOT] " + data['result']['response'].encode('utf-8'))
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
cl.findAndAddContactsByMid(invite)
cl.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if op.type == 25:
msg = op.message
if msg.text in ["Bot on"]:
wait["Selfbot"] = True
cl.sendText(msg.to,"Selfbot Sudah On Kembali.")
if op.type == 25:
if wait["Selfbot"] == True:
msg = op.message
if op.type in [26,25]:
msg = op.message
if msg.contentType == 7:
if wait['stiker'] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "[Stiker Check] \nSTKID : %s\nSTKPKGID : %s \nSTKVER : %s\n =>> Link...\nline://shopdetail/%s"%(stk_id,pkg_id,stk_ver,pkg_id)
cl.sendText(msg.to, filler)
else:
pass
if op.type == 26:
msg = op.message
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['Tagvirus'] == True:
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in mid:
msg.contentType = 13
msg.contentMetadata = {'mid': "JANDA'"}
cl.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["ว่าไงคับน้องสาว? " + cName + "มีอะไรให้ผมรับใช้คับ😂😂",cName + " แทคทำไมมิทราบ? มีอิโรยก๊ะว่ามา",cName + " แทคบ่อยๆเดะจับทำเมียนะ -..-","หยุดแทคสักพัก" + cName + " แล้วมาพบรักที่หลังแชท😝😝","😎😎😎\nคับ มีไรคับ " + cName, "ยังไม่ว่าง เดี๋ยวมาตอบนะ " + cName, "ไม่อยู่ ไปทำธุระ " + cName + "มีไรทิ้งแชทไว้ที่แชท.สตนะ?", "อ่ะ เอาอีกแระ " + cName + "แทคตมอย??????????????????","ป๊าาาด " + cName + " คุณนายคับ จะแทคทำไมคับ!"]
balas1 = "รูปภาพคนแทค. . ."
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendText(msg.to,balas1)
cl.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "23701825",
"STKPKGID": "1740802",
"STKVER": "1" }
cl.sendMessage(msg)
jawaban1 = ("มีอะไรครับ แทคแล้วไม่พูดจับรันนะ ห้าห้าห้าห้า")
tts = gTTS(text=jawaban1, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
break
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Gift"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.sendText(msg.to,"Gift Sudah Terkirim!")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
wait['gift'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["gift"] = False
break
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"ชื่อ :\n" + contact.displayName + "\n\nไอดี :\n" + msg.contentMetadata["mid"] + "\n\nสเตตัส :\n" + contact.statusMessage)
cl.sendText(msg.to,"รูปโปรไฟล " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"รูปปก " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Nothing")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Not in Blacklist")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "ลิ้งโพส URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpmsg)
else:
cl.sendText(msg.to,helpmsg)
elif msg.text.lower() == 'help3':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpgrup)
else:
cl.sendText(msg.to,helpgrup)
elif msg.text.lower() == 'help2':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpset)
else:
cl.sendText(msg.to,helpset)
elif msg.text.lower() == 'help1':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpmed)
else:
cl.sendText(msg.to,helpmed)
elif msg.text.lower() == 'speed':
cl.sendText(msg.to, "「Speed My SelfBot」")
start = time.time()
time.sleep(0.07)
elapsed_time = time.time() - start
cl.sendText(msg.to, "☞「 ความเร็วของเซลบอท 」\n☞ Type: Speed\n☞ Speed : %sseconds" % (elapsed_time))
elif msg.text.lower() == 'sp':
cl.sendText(msg.to, "「Speed My SelfBot」")
start = time.time()
time.sleep(0.07)
elapsed_time = time.time() - start
cl.sendText(msg.to, "☞「 ความเร็วของเซลบอท 」\n☞ Type: Speed\n☞ Speed : %sseconds" % (elapsed_time))
elif msg.text.lower() == 'crash':
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925',"}
cl.sendMessage(msg)
elif msg.text.lower() == '$':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
contact = cl.getContact(msg.contentMetadata["mid"])
cu = cl.channel.getCover(msg.contentMetadata["mid"])
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
jawaban1 = ("อุ๊ต๊ะอุ๊ต๊ะ ชิมิชิมิ อิ๊ขึอิ๊ขึ ตะมุตะมิ งุ้งงิ้งงุ้งงิ้ง")
cl.sendMessage(msg)
cl.sendText(msg.to,contact.displayName)
cl.sendText(msg.to,contact.statusMessage)
cl.sendText(msg.to,mid)
cl.sendImageWithURL(msg.to,image)
cl.sendImageWithURL(msg.to,path)
tts = gTTS(text=jawaban1, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "22832843",
"STKPKGID": "1705396",
"STKVER": "1" }
cl.sendMessage(msg)
cl.sendText(msg.to,"SELFBOT BY: " + "\n" + str(wait["comment1"]))
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
contact = cl.getContact(msg.contentMetadata["mid"])
cu = cl.channel.getCover(msg.contentMetadata["mid"])
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
jawaban1 = ("ไม่ต้อง งงนะครับ มันเป็นความสามารถพิเศษ ห้าห้าห้าห้า อิ๊ขึอิ๊ขึ")
cl.sendMessage(msg)
cl.sendText(msg.to,contact.displayName)
cl.sendText(msg.to,contact.statusMessage)
cl.sendText(msg.to,mid)
cl.sendImageWithURL(msg.to,image)
cl.sendImageWithURL(msg.to,path)
tts = gTTS(text=jawaban1, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "33158329",
"STKPKGID": "10788",
"STKVER": "1" }
cl.sendMessage(msg)
cl.sendText(msg.to,"SELFBOT BY: " + "\n" + str(wait["comment1"]))
elif msg.text.lower() == 'กังนัม':
msg.contentType = 7
msg.text = None
msg.contentMetadata={
"STKID": "33158332",
"STKPKGID": "10788",
"STKVER": "1" }
cl.sendMessage(msg)
#========================== B O T ``C O M M A N D =============================#
#==============================================================================#
elif msg.text.lower() == 'เปิดคท':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to on")
else:
cl.sendText(msg.to,"contact already on")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to on")
else:
cl.sendText(msg.to,"contact already on")
elif msg.text.lower() == 'ปิดคท':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to off")
else:
cl.sendText(msg.to,"contact already off")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to off")
else:
cl.sendText(msg.to,"contact already off")
elif msg.text.lower() == 'ลอคชื่อ':
if msg.to in wait['pname']:
cl.sendText(msg.to,"Done..")
else:
cl.sendText(msg.to,"bone..")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif msg.text.lower() == 'ปิดลอค':
if msg.to in wait['pname']:
cl.sendText(msg.to,"Done..")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"bone..")
elif "ปิดเชิญ" == msg.text:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"Done..")
elif "เปิดเชิญ" == msg.text:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"Done..")
except:
pass
elif msg.text.lower() == 'เปิดกัน':
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to on")
else:
cl.sendText(msg.to,"Protection already on")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to on")
else:
cl.sendText(msg.to,"Protection already on")
elif msg.text.lower() == 'เปิดกันลิ้ง':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to on")
else:
cl.sendText(msg.to,"Protection Qr already on")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to on")
else:
cl.sendText(msg.to,"Protection Qr already on")
elif msg.text.lower() == 'เปิดกันเชิญ':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to on")
else:
cl.sendText(msg.to,"Protection Invite already on")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to on")
else:
cl.sendText(msg.to,"Protection Invite already on")
elif msg.text.lower() == 'เปิดกันยก':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection set to on")
else:
cl.sendText(msg.to,"Cancel Protection already on")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection set to on")
else:
cl.sendText(msg.to,"Cancel Protection already on")
elif msg.text.lower() == 'เปิดเข้า':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to on")
else:
cl.sendText(msg.to,"Autojoin already on")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to on")
else:
cl.sendText(msg.to,"Autojoin already on")
elif msg.text.lower() == 'ปิดเข้า':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to off")
else:
cl.sendText(msg.to,"Autojoin already off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to off")
else:
cl.sendText(msg.to,"Autojoin already off")
elif msg.text.lower() == 'ปิดกัน':
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to off")
else:
cl.sendText(msg.to,"Protection already off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to off")
else:
cl.sendText(msg.to,"Protection already off")
elif msg.text.lower() == 'ปิดกันลิ้ง':
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to off")
else:
cl.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to off")
else:
cl.sendText(msg.to,"Protection Qr already off")
elif msg.text.lower() == 'ปิดกันเชิญ':
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to off")
else:
cl.sendText(msg.to,"Protection Invite already off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to off")
else:
cl.sendText(msg.to,"Protection Invite already off")
elif msg.text.lower() == 'ปิดกันยก':
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection Invite set to off")
else:
cl.sendText(msg.to,"Cancel Protection Invite already off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection Invite set to off")
else:
cl.sendText(msg.to,"Cancel Protection Invite already off")
elif msg.text in ["เปิดบอทยก"]:
if msg.from_ in admin:
wait["BotCancel"] = True
cl.sendText(msg.to,"เปิดใช้ระบบบอทยกเชิญอัติโนมัติ")
print wait["BotCancel"]
else:
cl.sendText(msg.to,"คุณไม่มีสิทย์ใช้คำสั่งนี้(-..-)")
elif msg.text in ["ปิดบอทยก"]:
if msg.from_ in admin:
wait["BotCancel"] = False
cl.sendText(msg.to,"ปิดใช้ระบบบอทยกเชิญอัติโนมัติแล้ว")
print wait["BotCancel"]
else:
cl.sendText(msg.to,"คุณไม่มีสิทย์ใช้คำสั่งนี้(-..-)")
elif "Gcancel:" in msg.text:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan")
else:
cl.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis")
else:
cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nilai tidak benar")
else:
cl.sendText(msg.to,"Weird value")
elif msg.text.lower() == 'เปิดออก':
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to on")
else:
cl.sendText(msg.to,"Auto Leave room already on")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to on")
else:
cl.sendText(msg.to,"Auto Leave room already on")
elif msg.text.lower() == 'ปิดออก':
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to off")
else:
cl.sendText(msg.to,"Auto Leave room already off")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to off")
else:
cl.sendText(msg.to,"Auto Leave room already off")
elif msg.text.lower() == 'เปิดแชร์':
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
elif msg.text.lower() == 'ปิดแชร์':
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
elif msg.text.lower() == 'เช็ค':
md = ""
if wait["contact"] == True: md+="Contact:on \n"
else: md+="Contact:off\n"
if wait["autoJoin"] == True: md+="Auto Join:on \n"
else: md +="Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+="Group cancel:off \n"
if wait["leaveRoom"] == True: md+="Auto leave:on \n"
else: md+="Auto leave:off \n"
if wait["BotCancel"] == True: md+="Bot cancel:on \n"
else: md+="Bot cancel:off \n"
if wait["timeline"] == True: md+="Share:on \n"
else:md+="Share:off \n"
if wait["autoAdd"] == True: md+="Auto add:on \n"
else:md+="Auto add:off \n"
if wait["autoBlock"] == True: md+="Auto Block:on \n"
else:md+="Auto Block:off \n"
if wait["protect"] == True: md+="Protect:on \n"
else:md+="Protect:off \n"
if wait["linkprotect"] == True: md+="Link Protect:on \n"
else:md+="Link Protect:off \n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on \n"
else:md+="Invitation Protect:off \n"
if wait["cancelprotect"] == True: md+="Cancel Protect:on \n"
else:md+="Cancel Protect:off \n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': "ub5abe828cd964292195c3c59d6322033"}
cl.sendMessage(msg)
cl.sendText(msg.to,"By: RED SAMURI SELFBØT")
elif cms(msg.text,["ผส","ผู้สร้าง"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "ub5abe828cd964292195c3c59d6322033"}
cl.sendMessage(msg)
cl.sendText(mag.to,"Ini Creator Saya Jan Lupa Di add Ya kak")
elif msg.text.lower() == 'เปิดแอด':
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
elif msg.text.lower() == 'ปิดแอด':
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
elif msg.text in ["Block on","เปิดออโต้บล็อค","เปิดบล็อค"]:
if wait["autoBlock"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✔👎〘•ระบบออโต้บล็อค\nเปิดใช้งานอยุ่แล้ว•〙👍")
else:
cl.sendText(msg.to,"✔👎〘•เปิดใช้ระบบออโต้บล็อค\nเรียบร้อยแล้ว•〙👍")
else:
wait["autoBlock"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"✔👎〘•เปิดใช้ระบบออโต้บล็อค\nเรียบร้อยแล้ว•〙👍")
else:
cl.sendText(msg.to,"✔👎〘•ระบบออโต้บล็อค\nเปิดใช้งานอยุ่แล้ว•〙👍")
elif msg.text in ["AutoBlock off","ปิดออโต้บล็อค","ปิดบล็อค"]:
if wait["autoBlock"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"👎〘•ระบบออโต้บล็อค\nปิดใช้งานอยู่แล้ว•〙👎")
else:
cl.sendText(msg.to,"👎〘•ปิดระบบออโต้บล็อค\nเรียบร้อยแล้ว•〙👎")
else:
wait["autoBlock"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"👎〘•ปิดระบบออโต้บล็อค\nเรียบร้อยแล้ว•〙👎")
else:
cl.sendText(msg.to,"👎〘•ระบบออโต้บล็อค\nปิดใช้งานอยู่แล้ว•〙👎")
elif "ตั้งข้อความแอด:" in msg.text:
wait["message"] = msg.text.replace("Pesan set:","")
cl.sendText(msg.to,"เชิญทำการเปลี่ยนแปลงข้อความคนแอด")
elif msg.text.lower() == 'pesan cek':
if wait["lang"] == "JP":
cl.sendText(msg.to,"ข้อความเมื่อเพิ่มเพื่อนโดยอัตโนมัติตั้งไว้ดังนี้ \n\n" + wait["message"])
else:
cl.sendText(msg.to,"ปลี่ยนการตั้งค่า ข้อความคนแอด ของคุณแล้ว \n ดังนี้\n" + wait["message"])
elif "ตั้งคอมเม้น:" in msg.text:
c = msg.text.replace("คอมเม้น:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เปลี่ยนการตั้งค่าคอมเม้นของคุณแล้ว ดังนี้")
else:
wait["comment"] = c
cl.sendText(msg.to,"เปลี่ยนการตั้งค่าคอมเม้นของคุณแล้ว \n ดังนี้\n" + c)
elif msg.text in ["Com on","เปิดเม้น","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di")
else:
cl.sendText(msg.to,"To open")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Comment Actived")
else:
cl.sendText(msg.to,"Comment Has Been Active")
elif msg.text in ["ปิดเม้น"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off")
else:
cl.sendText(msg.to,"To turn off")
elif msg.text in ["เช็คเม้น","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"]))
elif msg.text in ["คอมเม้น","Comment"]:
cl.sendText(msg.to,"ข้อความแสดงความคิดเห็นอัตโนมัติถูกตั้งไว้ดังนี้:??\n\n" + str(wait["comment"]))
elif msg.text in ["ข้อความแอด","message"]:
cl.sendText(msg.to,"ข้อความตอบรับคนแอดถูกตั้งไว้ดังนี้:??\n\n" + str(wait["message"]))
elif msg.text in ["ดำ"]:
wait["wblack"] = True
cl.sendText(msg.to,"กรุณาส่ง คอนแทค บุคคลที่คุณต้องการเพิ่มในบัญชีดำ")
elif msg.text in ["ขาว"]:
wait["dblack"] = True
cl.sendText(msg.to,"กรุณาส่ง คอนแทค บุคคลที่คุณต้องการเพิ่มในบัญชีขาว")
elif msg.text in ["บัญชีดำ"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"ไม่มีอะไรในบัญชีดำ")
else:
cl.sendText(msg.to,"ต่อไปนี้เป็นรายชื่อที่อยู่ในบัญชีดำ")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Jam already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam set on")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Jam already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"Jam set off")
elif "Jam say:" in msg.text:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Nama Jam Berubah menjadi:" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Jam")
#==============================================================================#
#==============================================================================#
elif msg.text in ["Invite"]:
wait["winvite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["ดึง"]:
wait["winvite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Bot off"]:
wait["Selfbot"] = False
cl.sendText(msg.to,"Selfbot Sudah Di Nonaktifkan.")
elif msg.text in ["เช็คคท"]:
wait["contact"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Owner")
try:
likeme()
except:
pass
elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Teman")
try:
likefriend()
except:
pass
elif msg.text in ["Like on","เปิดไลค์"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
elif msg.text in ["Like off","ปิดไลค์"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
elif msg.text in ["เปิดติ๊ก","Sticker on"]:
if wait['stiker'] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Stiker Already On")
else:
cl.sendText(msg.to,"Stiker Already On")
else:
wait["stiker"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Stiker Already On")
else:
cl.sendText(msg.to,"Stiker Already On")
elif msg.text in ["ปิดติ๊ก","Sticker off"]:
if wait["stiker"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Stiker Already Off")
else:
cl.sendText(msg.to,"Stiker Already Off")
else:
wait["stiker"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Stiker Already Off")
else:
cl.sendText(msg.to,"Stiker Already Off")
elif msg.text in ["เปิดก๊อก"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดใช้ระบบแจ้งเตือนคนเข้าออกอยู่แล้ว")
else:
wait["Sambutan"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดใช้ระบบแจ้งเตือนคนเข้าออก")
elif msg.text in ["ปิดก๊อก"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดใช้ระบบแจ้งเตือนคนเข้าออกอยู่แล้ว")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดใช้ระบบแจ้งเตือนคนเข้าออกแล้ว")
#=============================================================================#
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
cl.sendText(msg.to,"Success activated simisimi")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
cl.sendText(msg.to,"Success deactive simisimi")
elif msg.text in ["Read on","เปิดอ่าน"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"Auto Sider ON")
elif msg.text in ["Read off","ปิดอ่าน"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"Auto Sider OFF")
elif msg.text in ["Autorespon on","เปิดแทค","Respon on","Respon:on"]:
wait["detectMention"] = True
wait["kickMention"] = False
wait["Tagvirus"] = False
cl.sendText(msg.to,"Auto Respon ON")
elif msg.text in ["Autorespon off","ปิดแทค","Respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon OFF")
elif msg.text in ["เปิดแทคเจ็บ","Responkick on","Responkick:on"]:
wait["kickMention"] = True
wait["detectMention"] = False
wait["Tagvirus"] = False
cl.sendText(msg.to,"[AUTO RESPOND] Auto Kick yang tag ON")
elif msg.text in ["ปิดแทคเจ็บ","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"[AUTO RESPOND] Auto Kick yang tag ON")
elif msg.text in ["Tagvirus on","เปิดแทคดับ"]:
wait["Tagvirus"] = True
wait["detectMention"] = False
wait["kickMention"] = False
cl.sendText(msg.to,"[AUTO RESPOND] tagvirus yang tag OFF")
elif msg.text in ["Tagvirus off","ปิดแทคดับ"]:
wait["Tagvirus"] = False
cl.sendText(msg.to,"[AUTO RESPOND] tagvirus yang tag OFF")
#==============================================================================#
elif "Hay" in msg.text:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Hay","")
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
sendMessage(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[cl]
kicker=random.choice(klist)
random.choice(KAC).kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
sendMessage(msg.to,"Grup Dibersihkan")
elif ("หวด " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("ทดสอบ " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "ปลิว: " in msg.text.lower():
midd = msg.text.lower().replace("ปลิว: ","")
cl.kickoutFromGroup(msg.to,[midd])
elif ('invite ' in msg.text):
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text.lower() == 'ยก':
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
elif msg.text.lower() == 'เปิดลิ้ง':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group")
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text.lower() == 'ปิดลิ้ง':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close")
else:
cl.sendText(msg.to,"URL close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group")
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["Url","ลิ้ง"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
elif msg.text in ["Backup:on","Backup on","เปิดดึงกลับ"]:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบดึงคนกลับ\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off","Backup off","ปิดดึงกลับ"]:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบดึงคนกลับแล้ว\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text.lower() == 'เช็คบอท':
eltime = time.time() - mulai
start = time.time()
time.sleep(0.07)
elapsed_time = time.time() - start
van = "ระยะเวลาการทำงานของบอท : " + "\n" +waktu(eltime)
md = ""
if wait["contact"] == True: md+="Contact:on \n"
else: md+="Contact:off\n"
if wait["autoJoin"] == True: md+="Auto Join:on \n"
else: md +="Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+="Group cancel:off \n"
if wait["leaveRoom"] == True: md+="Auto leave:on \n"
else: md+="Auto leave:off \n"
if wait["BotCancel"] == True: md+="Bot cancel:on \n"
else: md+="Bot cancel:off \n"
if wait["timeline"] == True: md+="Share:on \n"
else:md+="Share:off \n"
if wait["autoAdd"] == True: md+="Auto add:on \n"
else:md+="Auto add:off \n"
if wait["autoBlock"] == True: md+="Auto Block:on \n"
else:md+="Auto Block:off \n"
if wait["protect"] == True: md+="Protect:on \n"
else:md+="Protect:off \n"
if wait["linkprotect"] == True: md+="Link Protect:on \n"
else:md+="Link Protect:off \n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on \n"
else:md+="Invitation Protect:off \n"
if wait["cancelprotect"] == True: md+="Cancel Protect:on \n"
else:md+="Cancel Protect:off \n"
cl.sendText(msg.to,"ครับผม! บอทยังอยู่ครับ" + " \n" + van + "\n\n☞「 ความเร็วของเซลบอท ณ ตอนนี้อยู่ที่」\n☞ Speed : %sseconds" % (elapsed_time) + "\n\n🌴การตั้งค่าของบอทถูกตั้งไว้ดังนี้🌴" + "\n" + md)
msg.contentType = 13
msg.contentMetadata = {'mid': "ub5abe828cd964292195c3c59d6322033"}
cl.sendMessage(msg)
cl.sendText(msg.to,"By: RED SAMURI SELFBØT")
#================================ STARTED ==============================================#
elif cms(msg.text,["ผู้สร้าง","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u46a050ebcc66a90b47fae6256547cc53"}
cl.sendMessage(msg)
elif "แอด" == msg.text.lower():
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"Creator Grup")
elif msg.text.lower() == 'ดึง:แอด':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if wait["lang"] == "JP":
cl.inviteIntoGroup(msg.to,[gcmid])
else:
cl.inviteIntoGroup(msg.to,[gcmid])
elif ("Gname: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gname: ","")
cl.updateGroup(X)
elif msg.text.lower() == 'ข้อมูลกลุ่ม':
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[ชื่อของกลุ่ม 👉 ]\n" + group.name + "\n\n[Iไอดีของกลุ่ม : ]\n" + group.id + "\n\n[ผู้สร้างกลุ่ม :]\n" + gCreator + "\n\n[รูปภาพของกลุ่ม : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nลิ้งของกลุ่ม : เปิด"
else: md += "\n\nลิ้งของกลุ่ม : ปิด"
if group.invitee is None: md += "\nจำนวนสมาชิก : " + str(len(group.members)) + " คน" + "\nจำนวนสมาชิกค้างเชิญ : 0 คน"
else: md += "\nจำนวนสมาชิก : " + str(len(group.members)) + " คน" + "\nจำนวนสมาชิกค้างเชิญ : " + str(len(group.invitee)) + " คน"
cl.sendText(msg.to,md)
elif msg.text.lower() == 'ไอดีกลุ่ม':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#==============================================================================#
elif "เช็ค: " in msg.text:
saya = msg.text.replace("เช็ค: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
cl.sendMessage(msg)
contact = cl.getContact(saya)
cu = cl.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"ชื่อ. :\n" + contact.displayName + "\n\nสเตตัส. :\n" + contact.statusMessage)
cl.sendText(msg.to,"รูปโปร. " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"รูปปก. " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "เช็คกลุ่ม: " in msg.text:
saya = msg.text.replace("เช็คกลุ่ม: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).id
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "ชื่อกลุ่ม :\n" + group.name + "\n\nไอดีกลุ่ม :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nลิ้งกลุ่ม : ปิด"
else: md += "\n\nลิ้งกลุ่ม : เปิด"
if group.invitee is None: md += "\nจำนวนสมาชิก : " + str(len(group.members)) + " คน" + "\nจำนวนสมาชิกค้างเชิญ : " + str(len(group.invitee)) + " 0 คน"
else: md += "\nจำนวนสมาชิก : " + str(len(group.members)) + " คน" + "\nจำนวนสมาชิกค้างเชิญ : " + str(len(group.invitee)) + " 8o"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["รายชื่อเพื่อน"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["รายชื่อสมาชิก"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
elif ('Grupmember' in msg.text):
saya = msg.text.replace('Grupmember','')
gid = cl.getGroupIdsJoined()
num=1
msgs="═════════List Member═════════-"
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
me = gna.members(i)
msgs+="\n[%i] %s" % (num, me.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(me)
if h == saya:
cl.sendText(msg.to, msgs)
elif msg.text in ["Friendlistmid"]:
gruplist = cl.getAllContactIds()
kontak = cl.getContacts(gruplist)
num=1
msgs="═════════List FriendMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════List FriendMid═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["เช็คบล็อค"]:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="═════════List Blocked═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["รายชื่อกลุ่ม"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["เช็คไอดีกลุ่ม"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Grupimage: " in msg.text:
saya = msg.text.replace('Grupimage: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif "ชื่อกลุ่ม" in msg.text.lower():
saya = msg.text.lower().replace('ชื่อกลุ่ม','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ชื่อกลุ่ม : ]\n" + gid.name)
elif "ไอดีกลุ่ม" in msg.text:
saya = msg.text.replace('ไอดีกลุ่ม','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ไอดีกลุ่ม : ]\n" + gid.id)
elif "Grupinfo: " in msg.text:
saya = msg.text.replace('Grupinfo: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Glist"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif "กันรัน" in msg.text:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"คำเชิญเข้ากลุ่มจะถูกปฏิเสธคำเชิญทั้งหมด")
else:
cl.sendText(msg.to,"เปิดปฏิเสธคำเชิญทั้งหมดอยู่แล้ว")
elif msg.text in ["ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว")
elif msg.text in ["Delete chat","ล้างแชท"]:
cl.removeAllMessages(op.param2)
cl.sendText(msg.to,"สำเร็จ..Delete Chat")
cl.sendText(msg.to,"Success...")
elif "ลบแชท" in msg.text:
try:
cl.removeAllMessages(op.param2)
print "[Command] Remove Chat"
cl.sendText(msg.to,"Done")
except Exception as error:
print error
cl.sendText(msg.to,"Error")
elif msg.text.lower() == 'gcancel':
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku menolak semua undangan")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "แอดทั้งห้อง" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"แอดทุกคนในห้องนี้แล้วคับ")
#==============================================================================#
elif "แจ๊ะ" == msg.text.lower():
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "แทคสมาชิกแล้วจำนวน:\n" + str(jml) + " คน"
cnt.to = msg.to
cl.sendMessage(cnt)
elif msg.text in ["Setview","จับ","Cctv"]:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "☆Checkpoint Checked☆")
print "Setview"
elif msg.text in ["Viewseen","Check","อ่าน","Cyduk"]:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "╔═════════════════════════\n║ ☆☞ LIST VIEWERS ☜☆\n╠═════════════════════════\n╠➩"
grp = '\n╠➩ '.join(str(f) for f in dataResult)
total = '\n╠═════════════════════════\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\n╚═════════════════════════"
cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "☆Auto Checkpoint☆")
else:
cl.sendText(msg.to, "☆Belum Ada Viewers☆")
print "Viewseen"
elif "ประกาศกลุ่ม: " in msg.text:
bc = msg.text.replace("ประกาศกลุ่ม: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendText(i,"======[ข้อความประกาศกลุ่ม]======\n\n"+bc+"\n\nBy: RED SAMURI SELFBOT!!")
elif "ประกาศแชท: " in msg.text:
bc = msg.text.replace("ประกาศแชท: ","")
gid = cl.getAllContactIds()
for i in gid:
cl.sendText(i,"======[ข้อความประกาศแชท]======\n\n"+bc+"\n\nBy: RED SAMURI SELFBOT!!")
elif "ส่งรูปภาพตามกลุ่ม: " in msg.text:
bc = msg.text.replace("ส่งรูปภาพตามกลุ่ม: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendImageWithURL(i, bc)
elif "ส่งรูปภามตามแชท: " in msg.text:
bc = msg.text.replace("ส่งรูปภาพตามแชท: ","")
gid = cl.getAllContactIds()
for i in gid:
cl.sendImageWithURL(i, bc)
elif "Spam change: " in msg.text:
wait["spam"] = msg.text.replace("Spam change: ","")
cl.sendText(msg.to,"spam changed")
elif "Spam add: " in msg.text:
wait["spam"] = msg.text.replace("Spam add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam: " in msg.text:
strnum = msg.text.replace("Spam: ","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["commen1"])
elif "Halo @" in msg.text:
_name = msg.text.replace("Halo @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
else:
pass
elif "Spam" in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif "รันคทไวรัส @" in msg.text:
_name = msg.text.replace("รันคทไวรัส @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 13
msg.contentMetadata = {'mid': "u46a050ebcc66a90b47fae6256547cc53',"}
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(g.mid,msg)
cl.sendText(msg.to, "Done")
print " Spammed !"
elif "say " in msg.text:
say = msg.text.replace("say ","")
lang = 'th'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["ส่งของขวัญ"]:
wait["gift"] = True
cl.sendText(msg.to,"ส่งคทมาเลยคับ")
elif msg.text in ["ก๊อปคท"]:
wait2["copy"] = True
cl.sendText(msg.to,"ส่งคทมาเลยคับ")
elif msg.text in ["รันของขวัญ"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
elif msg.text in ["เชิญ:@"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif msg.text in ["ออกทุกกลุ่ม"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
vipro.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nผู้สร้างสั่งให้ผมออก..(๑•́ ₃ •̀๑)\nติดต่อผู้สร้างได้ที่...ไอดี samuri5..เด้อ〒_〒")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "tak" in msg.text:
if msg.from_ in creator + admin:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif msg.text in ["kick:@"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.kickoutFromGroup(msg.to,[gCreator])
print "success kick gCreator"
except:
pass
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif "/Sendpm " in msg.text:
bctxt = msg.text.replace("/Sendpm ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia, (bctxt))
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
# elif msg.text.lower() in dangerMessage:
# if msg.toType == 2:
# try:
# cl.kickoutFromGroup(msg.to,[msg.from_])
# except:
# cl.kickoutFromGroup(msg.to,[msg.from_])
# cl.sendText(msg.to, "Hati-Hati bicara ya kak....!!!")
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
cl.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
cl.sendImageWithURL(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
cl.sendVideoWithURL(msg.to,wait["pap"])
#==============================================================================#
elif 'รูป ' in msg.text:
googl = msg.text.replace('รูป ',"")
url = 'https://www.google.com/search?hl=en&biw=1366&bih=659&tbm=isch&sa=1&ei=vSD9WYimHMWHvQTg_53IDw&q=' + googl
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
try:
start = timeit.timeit()
cl.sendImageWithURL(msg.to,path)
cl.sendText(msg.to, "Google Image \nType: Search Image\nWaktu dicari: %s" % (start) +"\nTotal Image Links = "+str(len(items)))
print "[Notif] Search Image Google Sucess"
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'ไอดี':
cl.sendText(msg.to,mid)
elif "โพส: " in msg.text:
tl_text = msg.text.replace("โพส: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "เปลี่ยนชื่อ: " in msg.text:
string = msg.text.replace("เปลี่ยนชื่อ: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"เปลี่ยนชื่อของคุณแล้วดังนี้👇 " + "\n" + string + "")
elif "เปลี่ยนตัส: " in msg.text:
string = msg.text.replace("เปลี่ยนตัส: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"เปลี่ยนตัสของคุณแล้วดังนี้ " + string + "")
elif msg.text in ["ชื่อ"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["ตัส"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["รูปโปร"]:
h = cl.getContact(mid)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["วีดีโอโปร"]:
h = cl.getContact(mid)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["ลิ้งรูปโปร"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["รูปปก"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
elif msg.text in ["ลิ้งรูปปก"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
#======================================================================#
elif "เปิดสแกน" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
cl.sendText(msg.to,"เปิดระบบสแกนคนอ่านอัตโนมัติ")
elif "ปิดสแกน" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
cl.sendText(msg.to, "ปิดระบบสแกนคนอ่านอัตโนมัติแล้ว")
else:
cl.sendText(msg.to, "โปรดใช้คำสั่งเปิดแสกนก่อนจะปิด")
#============================================================================#
elif "idline: " in msg.text:
msgg = msg.text.replace('idline: ','')
conn = cl.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
cl.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
cl.sendMessage(msg)
elif "ไอดี @" in msg.text:
_name = msg.text.replace("ไอดี @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "ข้อมูล " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "ตัส " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "ชื่อ " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "รูปปก " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "คท " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "รูปโปร @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("รูปโปร @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "วีดีโอโปร @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("วีดีโอโปร @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Picturl @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "ลิ้งรูปปก @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("ลิ้งรูปปก @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "รูปกลุ่ม" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "ลิ้งรูปกลุ่ม" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendText(msg.to,path)
elif "Clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
hun = cl.getProfile()
hun.pictureStatus = P
cl.updateProfile(hun)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif msg.text in ["คืนร่าง"]:
try:
cl.updateProfile.pictureStatus(backup.pictureStatus)
cl.updateProfile.statusMessage(backup.statusMessage)
cl.updateProfile.displayName(backup.displayName)
cl.sendText(msg.to, "กลับร่างเดิมแล้ว")
except Exception as e:
cl.sendText(msg.to, str (e))
elif "Cp @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Cp @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "ไม่สำเร็จ")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "ก็อพปี้เรียบร้อย")
except Exception as e:
print e
elif msg.text in ["Mybackup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "กลับร่างเดิมแล้ว")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text in ["Backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "กลับร่างเดิมแล้ว")
except Exception as e:
cl.sendText(msg.to, str (e))
elif 'copy ' in msg.text.lower():
if msg.from_ in creator + admin:
if msg.toType == 2:
red = re.compile(re.escape('copy '),re.IGNORECASE)
tname = red.sub('',msg.text)
tname = tname.lstrip()
tname = tname.replace(" @","$spliter$")
tname = tname.rstrip()
tname = tname.split("$spliter$")
tname = tname[0]
tname = tname[1:]
clist = {
"Founded":False,
"displayName":"",
"statusMessage":"",
"pictureStatus":""
}
mems = cl.getGroup(msg.to).members
for targ in mems:
if targ.displayName == tname:
clist["displayName"] = targ.displayName
clist["statusMessage"] = targ.statusMessage
clist["pictureStatus"] = targ.pictureStatus
clist["Founded"] = True
if clist["Founded"]:
wait["selfStatus"] = False
me = cl.getProfile()
me.displayName = clist["displayName"]
me.statusMessage = clist["statusMessage"]
me.pictureStatus = clist["pictureStatus"]
cl.updateDisplayPicture(me.pictureStatus)
cl.updateProfile(me)
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Done")
elif msg.text in ["Recopy"]:
if msg.from_ in creator + admin:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Success")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text == "Clone":
if msg.toType == 0:
targ = cl.getContact(msg.to)
me = cl.getProfile()
me.displayName = targ.displayName
me.statusMessage = targ.statusMessage
me.pictureStatus = targ.pictureStatus
cl.updateDisplayPicture(me.pictureStatus)
cl.updateProfile(me)
cl.sendText(msg.to,"สำเร็จแล้ว")
else:
cl.sendText(msg.to,"คำสั่งนี้ใช้ได้เฉพาะในแชทส่วนตัวเท่านั้น")
#==============================================================================#
elif "/fancytext: " in msg.text:
txt = msg.text.replace("/fancytext: ", "")
t1 = "\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xa0\x81\xf4\x80\xa0\x81\xf4\x80\xa0\x81"
t2 = "\xf4\x80\x82\xb3\xf4\x8f\xbf\xbf"
cl.sendText(msg.to, t1 + txt + t2)
elif "อู้-id " in msg.text:
isi = msg.text.replace("อู้-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "อู้-en " in msg.text:
isi = msg.text.replace("อู้-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "อู้-ar" in msg.text:
isi = msg.text.replace("อู้-ar ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "อู้-jp" in msg.text:
isi = msg.text.replace("อู้-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "อู้-ko" in msg.text:
isi = msg.text.replace("อู้-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Th@en" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'en'
kata = msg.text.replace("Th@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM TH🍁\n" + "" + kata + "\n🍁TO ENGLISH🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "En@th" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'th'
kata = msg.text.replace("En@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM EN🍁\n" + "" + kata + "\n🍁TO TH🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Th@jp" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Th@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM TH🍁\n" + "" + kata + "\n🍁TO JP🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Jp@th" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'th'
kata = msg.text.replace("Jp@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM JP🍁\n" + "" + kata + "\n🍁TO TH🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM ID🍁\n" + "" + kata + "\n🍁TO TH🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM TH🍁\n" + "" + kata + "\n🍁TO ID🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Th@ar" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Th@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM TH🍁\n" + "" + kata + "\n🍁TO AR🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Ar@th" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'th'
kata = msg.text.replace("Ar@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM AR🍁\n" + "" + kata + "\n🍁TO TH🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Th@ko" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Th@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM TH🍁\n" + "" + kata + "\n🍁TO KO🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif "Ko@th" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'th'
kata = msg.text.replace("Ko@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"🍁FROM KO🍁\n" + "" + kata + "\n🍁TO TH🍁\n" + "" + result + "\n🍁SUKSES🍁")
elif msg.text.lower() == 'welcome':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"🙏สวัสดีคับคนมาใหม่ 🙏" + "\n🌾ยินดีต้อนรับเข้าสู่กลุ่ม 🌾" + "\n👉" + str(ginfo.name) + "👈" + "\nมาใหม่แก้ผ้าด้วยนะ😂😂")
cl.sendText(msg.to,"By: •─✯RED★SAMURI★SELFBOT✯─•")
jawaban1 = ("ยินดีที่ได้รู้จักนะครับ " + "ผมชื่อเรด นะ")
tts = gTTS(text=jawaban1, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif msg.text.lower() == 'แนะนำตัว':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"🙏สวัสดีคับทุกคน 🙏" + "\n🌾ยินดีที่ได้เข้ามาในกลุ่ม 🌾" + "\n👉" + str(ginfo.name) +"👈")
cl.sendText(msg.to," สวัสดีแอดด้วยนะ" + "\nมาใหม่ต้องแก้ผ้าด้วยรึเปล่า 😆😆" + "\n\nBy: •─✯RED★SAMURI★SELFBOT✯─•")
jawaban1 = ("ผมชื่อเรดนะ" + "ยินดีที่ได้รู้จักกับทุกคนครับ")
tts = gTTS(text=jawaban1, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-Th " in msg.text:
say = msg.text.replace("Say-Th ","")
lang = 'Th'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "แซว" in msg.text:
tanya = msg.text.replace("แซว","")
jawab = ("สอ บอ มอ ยอ หอ","ว่าไงน้องสาว","ใครโสดขอมือหน่อย","ตับ ตับตับ ตับตับ")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif "github " in msg.text:
a = msg.text.replace("github ","")
b = urllib.quote(a)
cl.sendText(msg.to,"เริ่มต้นค้นหา ...")
cl.sendText(msg.to, "Title: " + a + "\nLink: https://github.com/search?q=" +b)
cl.sendText(msg.to, "☝กดลิ้งเข้าไปหาเองเด้อ🔬👌🔭")
elif "เพลสโต " in msg.text:
tob = msg.text.replace("เพลสโต ","")
cl.sendText(msg.to,"กำลังค้นหาชื่อแอพ...")
cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
cl.sendText(msg.to,"☝กดลิ้งเข้าไปโหลดได้เลยนะ ^ - ^")
elif "twitter " in msg.text:
a = msg.text.replace("twitter ","")
b = urllib.quote(a)
cl.sendText(msg.to,"เริ่มต้นทำการค้นหา ...")
cl.sendText(msg.to, "https://www.twitter.com/search?q=" + b)
cl.sendText(msg.to,"ทำการค้นหาสำเร็จ เชิญเข้าไปส่องโลด😆😆")
elif "smule " in msg.text:
a = msg.text.replace("smule ","")
b = urllib.quote(a)
cl.sendText(msg.to,"กำลังเริ่มต้นค้นหา ...")
cl.sendText(msg.to, "Nama: "+b+"\nId smule: http://smule.com/search?q=" +b)
elif "ไอจี " in msg.text:
a = msg.text.replace("ไอจี ","")
b = urllib.quote(a)
cl.sendText(msg.to,"กำลังเริ่มต้นค้นหา ...")
cl.sendText(msg.to, "https://www.instagram.com/"+b+"?hl=th")
cl.sendText(msg.to,"ทำการค้นหาสำเร็จ เชิญเข้าไปส่องโลด😆😆")
elif "เฟสบุค" in msg.text:
a = msg.text.replace("เฟสบุค","")
b = urllib.quote(a)
cl.sendText(msg.to,"กำลังเริ่มต้นค้นหา ...")
cl.sendText(msg.to, "https://www.facebook.com" + b)
cl.sendText(msg.to," ทำการค้นหาสำเร็จ ")
elif "ส่องเฟส " in msg.text:
a = msg.text.replace("ส่องเฟส ","")
b = urllib.quote(a)
cl.sendText(msg.to,"กำลังเริ่มต้นค้นหา ...")
cl.sendText(msg.to, "https://www.facebook.com/search/top/?q=" + b)
cl.sendText(msg.to,"ทำการค้นหาสำเร็จ เชิญเข้าไปส่องโลด😆😆")
elif "กูเกิ้ล " in msg.text:
a = msg.text.replace("กูเกิ้ล ","")
b = urllib.quote(a)
cl.sendText(msg.to,"โปรดรอสักครู่...")
cl.sendText(msg.to, "https://www.google.co.th/search?q=" + b)
cl.sendText(msg.to,"ทำการค้นหาสำเร็จ↖(^ω^)↗")
elif "ยูทูป " in msg.text:
a = msg.text.replace("ยูทูป ","")
b = urllib.quote(a)
cl.sendText(msg.to,"โปรดรอสักครู่...")
cl.sendText(msg.to, "https://www.youtube.com/results?search_query=" + b)
cl.sendText(msg.to,"ทำการค้นหาสำเร็จ↖(^ω^)↗")
elif 'wikipedia ' in msg.text:
try:
wiki = msg.text.replace("wikipedia ","")
wikipedia.set_lang("th")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=3)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="ข้อความยาวเกินไปโปรดกดที่ลิ้งเพื่อดูข้อมูล\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "video " in msg.text:
a = msg.text.replace("video ","")
b = urllib.quote(a)
cl.sendText(msg.to,"โปรดรอสักครู่...")
cl.sendText(msg.to, "{ Xvideos search page }\n\nTitle: "+b+"\nSource : https://porngangs.com/?tag=" +b)
elif "Youtube " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
cl.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "mp4 " in msg.text:
a = msg.text.replace("mp4 ", "").strip()
query = urllib.quote(a)
url = "https://www.youtube.com/results?search_query=mp4" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, url)
elif 'Music ' in msg.text:
try:
textToSearch = (msg.text).replace('Music ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif 'ขอเพลง ' in msg.text:
try:
textToSearch = (msg.text).replace('ขอเพลง ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif 'วีดีโอ ' in msg.text:
try:
textToSearch = (msg.text).replace('วีดีโอ ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=หนัง" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif "เช็คไอจี " in msg.text:
try:
instagram = msg.text.replace("เช็คไอจี ","")
response = requests.get("https://www.instagram.com/"+instagram+"?hl=th")
data = response.read()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
cl.sendText(msg.to, str(text))
cl.sendImageWithURL(msg.to, profileIG)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "/postig" in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
cl.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
cl.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif msg.text.lower() == 'time':
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bulan = blan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text.lower() == 'ปฏิทิน':
wait2['setTime'][msg.to] = datetime.today().strftime('วันเดือนปี : %Y-%m-%d \nDay : %A \nเวลา : %H:%M:%S')
cl.sendText(msg.to, "🍁ปฏิทิน👉REDSAMURI SELFBØT🍁\n\n" + (wait2['setTime'][msg.to]))
#==============================================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text.lower() == 'reboot':
print "[Command]Restart"
try:
cl.sendText(msg.to,"Restarting...")
cl.sendText(msg.to,"Restart Success")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif "Turn off" in msg.text:
try:
import sys
sys.exit()
except:
pass
elif msg.text.lower() == 'runtime':
cl.sendText(msg.to,"「Please wait..」\nType :Loading...\nStatus : Loading...")
eltime = time.time() - mulai
van = "Type : Bot Sedang Berjalan \nStatus : Aktif \nMySelbot sudah berjalan selama"+waktu(eltime)
cl.sendText(msg.to,van)
#==============================================================================#
elif "รันโลด" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("RED SAMURI SELFBOT", mi_d)
cl.sendText(msg.to,"RED SAMURI")
cl.createGroup("RED SAMURI SELFBOT", mi_d)
cl.sendText(msg.to,"RED SAMURI")
cl.createGroup("RED SAMURI SELFBOT", mi_d)
cl.sendText(msg.to,"RED SAMURI")
cl.createGroup("RED SAMURI SELFBOT", mi_d)
cl.sendText(msg.to,"RED SAMURI")
cl.createGroup("RED SAMURI SELFBOT", mi_d)
cl.sendText(msg.to,"RED SAMURI")
cl.createGroup("RED SAMURI SELFBOT", mi_d)
cl.sendText(msg.to,"RED SAMURI")
elif "รัน @" in msg.text:
print "[Command]covergroup"
_name = msg.text.replace("รัน @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
thisgroup = cl.getGroups([msg.to])
Mids = [target for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("RED SAMURI Group",mi_d)
cl.sendText(msg.to,"🏂⛷️[จะออกไปแตะขอบฟ้า]")
cl.createGroup("RED SAMURI Group",mi_d)
cl.sendText(msg.to,"🏂⛷️[จะออกไปแตะขอบฟ้า]")
cl.createGroup("RED SAMURI Group",mi_d)
cl.sendText(msg.to,"🏂⛷️[จะออกไปแตะขอบฟ้า]")
cl.createGroup("RED SAMURI Group",mi_d)
cl.sendText(msg.to,"🏂⛷️[จะออกไปแตะขอบฟ้า]")
cl.createGroup("RED SAMURI Group",mi_d)
cl.createGroup("RED SAMURI Group",mi_d)
cl.sendText(msg.to,"🏂⛷️[จะออกไปแตะขอบฟ้า]")
cl.createGroup("RED SAMURI Group",mi_d)
cl.createGroup("RED SAMURI Group",mi_d)
cl.sendText(msg.to,"🏂⛷️[จะออกไปแตะขอบฟ้า]")
cl.sendText(msg.to,"เรียบร้อย")
except:
pass
print "[Command]covergroup"
elif "รันแชท @" in msg.text:
_name = msg.text.replace("รันแชท @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(g.mid,"RED SAMURI")
cl.sendText(msg.to, "Done")
print " Spammed !"
elif "รัน: " in msg.text:
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
contact = cl.getContact(key)
cl.createGroup(msg.to,"RED SAMURI Group",contact)
cl.sendText(msg,to,"┌∩┐(◣_◢)┌∩┐")
#=================================================================================#
elif "ของขวัญ @" in msg.text:
_name = msg.text.replace("ของขวัญ @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
cl.sendText(msg.to, "🌸ตรวจสอบของขวัญได้ที่แชทนะจ๊ะ🌸...😘😘")
elif "ส่งของขวัญ " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("ส่งของขวัญ ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + "🌸ตรวจสอบของขวัญได้ที่แชทนะจ๊ะ🌸...😘😘")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "ของขวัญ2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("ของขวัญ1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " 🌸ตรวจสอบของขวัญได้ที่แชทนะจ๊ะ🌸...😘😘")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "ของขวัญ3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("ของขวัญ2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " 🌸ตรวจสอบของขวัญได้ที่แชทนะจ๊ะ🌸...😘😘")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "ของขวัญ4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("ของขวัญ4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " 🌸ตรวจสอบของขวัญได้ที่แชทนะจ๊ะ🌸...😘😘")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif msg.text in ["แสกนดำ"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user\nมีบัญชีดำของคุณอยู่กลุ่มนี้")
xname = ""
for mi_d in wait["blacklist"]:
xname = cl.getContact(mi_d).displayName + ""
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mm)+'}]}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif "แบนกลุ่ม: " in msg.text:
grp = msg.text.replace("แบนกลุ่ม: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = red.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
cl.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
cl.sendText(msg.to, "Khusus red")
elif msg.text in ["กลุ่มติดดำ","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
cl.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +cl.getGroup(gid).name + "\n"
cl.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
cl.sendText(msg.to, "Khusus Admin")
elif msg.text in ["ล้างแบนกลุ่ม: "]:
if msg.from_ in admin:
ng = msg.text.replace("ล้างแบนกลุ่ม: ","")
for gid in wait["BlGroup"]:
if cl.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
cl.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
cl.sendText(msg.to, "Khusus red")
elif "แบน @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("แบน @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendText(msg.to,_nametarget + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "ล้างแบน @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("ล้างแบน @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
del wait["blacklist"][target]
cl.sendText(msg.to,_nametarget + " Delete From Blacklist")
except:
cl.sendText(msg.to,_nametarget + " Not In Blacklist")
elif msg.text in ["แบน"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["ล้างแบน"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif "แบน:" in msg.text:
nk0 = msg.text.replace("แบน:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "ล้างแบน:" in msg.text:
nk0 = msg.text.replace("ล้างแบน:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Delete From Blacklist")
except:
cl.sendText(msg.to,_name + " Not In Blacklist")
elif msg.text in ["เครีย์แบน"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"Blacklist Telah Dibersihkan")
elif msg.text.lower() == '/ดำ':
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text.lower() == '/ขาว':
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["เช็คดำ"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"ไม่มีอะไรในบัญชีดำ")
else:
cl.sendText(msg.to,"รายชื่อสมาชิกในบัญชีดำ")
num=1
msgs="══════════List Blacklist═════════"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n══════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(wait["blacklist"])
cl.sendText(msg.to, msgs)
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
cl.sendText(msg.to,cocoa)
elif msg.text.lower() == 'ไล่ดำ':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "#Banall" in msg.text:
nk0 = msg.text.replace("#Banall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "#Unbanall" in msg.text:
nk0 = msg.text.replace("#Unbanall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["ดับไฟ"]:
msg.contentType = 13
msg.contentMetadata = {'mid': "u46a050ebcc66a90b47fae6256547cc53',"}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
elif "Leave all" == msg.text:
gid = cl.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
cl.sendText(i,"ลาก่อย!")
cl.leaveGroup(i)
cl.sendText(msg.to,"Success Leave All Group")
else:
cl.sendText(msg.to,"คุณไม่มีสิทย์ใช้คำสั่งนี้")
elif msg.text in ["ออก"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
elif msg.text in ["Acc invite"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = cl.getGroup(i)
_list += gids.name
cl.acceptGroupInvitation(i)
else:
break
if gid is not None:
cl.sendText(msg.to,"ยอมรับคำเชิญทั้งหมดจากกลุ่มแล้ว :\n" + _list)
else:
cl.sendText(msg.to,"ไม่มีกลุ่มที่รอดำเนินการในขณะนี้")
elif "@bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
#===============================================================================#
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
pass
if op.type == 19:
if not op.param2 in Bots:
try:
gs = cl.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[cl]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.1)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
cl.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots:
try:
gs = cl.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param2])
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 5:
if wait["autoBlock"] == True:
cl.blockContact(op.param1)
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 not in Bots:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
pass
pass
cl.sendText(op.param1,"Group Name Lock")
cl.sendText(op.param1,"Haddeuh dikunci Pe'a")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in admin:
cl.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"Maaf " + red.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = ck.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
else:
if wait["BotCancel"] == True:
if op.param3 in Bots:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Blacklist Detected")
else:
pass
#==============================================================================#
#------------------------------------------------------------------------------#
#==============================================================================#
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
# Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nรู้นะว่าอ่านอยู่. . .\nออกมาคุยเดี๋ยวนี้ (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
else:
cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nนี่ก็อีกคน. . .อ่านอย่างเดียวเลย\nไม่ออกมาคุยล่ะ (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
else:
cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nแอบกันจังเลยนะ???\nคิดว่าเป็นนินจารึไง...??😆😆 ")
time.sleep(0.2)
summon(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,200):
hasil = cl.activity(limit=200)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.60)
def likeme():
for zx in range(0,200):
hasil = cl.activity(limit=200)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
print "Like"
except:
pass
else:
print "Status Sudah di Like"
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 50)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
| 46.40875 | 446 | 0.415117 |
e0d385ada0af96855016efa55f0e1e9cbf009856
| 1,290 |
py
|
Python
|
FUNDASTORE/APPS/PRINCIPAL/views.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/APPS/PRINCIPAL/views.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/APPS/PRINCIPAL/views.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
from django.shortcuts import render
from django.core.mail import send_mail
from .forms import *
# Create your views here.
def ver_contactenos(request):
formulario = FormularioContactenos()
if request.method == "POST":
formulario = FormularioContactenos(request.POST)
asunto = 'CONSULTA DE -' + str(formulario["nombre"].value())
mensaje = 'INFORMACION DE CONTACTO\nNOMBRE: [NOM]\nEMAIL: [EMAIL]\nTELEFONO: [TEL]\n\nMENSAJE\n[MSJ]'
mensaje =mensaje.replace("[NOM]",formulario["nombre"].value())
mensaje =mensaje.replace("[EMAIL]",formulario["email"].value())
mensaje =mensaje.replace("[TEL]",formulario["telefono"].value())
mensaje =mensaje.replace("[MSJ]",formulario["mensaje"].value())
destinatarios = ["[email protected]"]
send_mail(asunto,mensaje,None,destinatarios,fail_silently=False)
asunto = 'SU CONSULTA HA SIDO RECIBIDA'
mensaje = 'HEMOS RECIBIDO SU CONSULTA EN 3 DIAS TENDRA RESPUESTA'
destinatarios = [str(formulario["email"].value())]
send_mail(asunto,mensaje,None,destinatarios,fail_silently=False)
else:
formulario = FormularioContactenos()
contexto = {"formulario":formulario}
return render(request,'PRINCIPAL/contactenos.html',contexto)
| 49.615385 | 109 | 0.691473 |
1ce2741de8abf220456b5b125a9240795c9a788a
| 3,598 |
py
|
Python
|
tests/test_lueftungsanlage_addr.py
|
fgoettel/wgt
|
e093e2a003fa6c9d4c2082cebbc95701d7f9089d
|
[
"Unlicense"
] | null | null | null |
tests/test_lueftungsanlage_addr.py
|
fgoettel/wgt
|
e093e2a003fa6c9d4c2082cebbc95701d7f9089d
|
[
"Unlicense"
] | null | null | null |
tests/test_lueftungsanlage_addr.py
|
fgoettel/wgt
|
e093e2a003fa6c9d4c2082cebbc95701d7f9089d
|
[
"Unlicense"
] | 1 |
2022-01-29T12:01:47.000Z
|
2022-01-29T12:01:47.000Z
|
"""Tests for `wgt` addresses package."""
import pytest
from wgt import WGT
def test_addr():
"""Ensure that the addresses are in sync with documentation.
Doc updated 31.03.2020
"""
# pylint: disable=protected-access,too-many-statements
assert WGT._addr_betriebsart == 100
assert WGT._addr_luftstufe_manuell == 101
assert WGT._addr_luftstufe_aktuell == 102
assert WGT._addr_luftleistung_linear_manuell == 103
assert WGT._addr_luftstufe_ueberschreibung == 104
assert WGT._addr_luftstufe_zeitprogramm_basis == 110
assert WGT._addr_stosslueftung == 111
assert WGT._addr_stosslueftung_restlaufzeit == 112
assert WGT._addr_waermepumpe == 114
assert WGT._addr_nachheizregister == 116
assert WGT._addr_geblaese_zuluft == 117
assert WGT._addr_geblaese_abluft == 118
assert WGT._addr_erdwaermetauscher == 121
assert WGT._addr_bypass == 123
assert WGT._addr_aussenklappe == 131
assert WGT._addr_vorheizregister == 133
assert WGT._addr_luftstufe_zeitprogramm == 140
assert WGT._addr_luftstufe_sensoren == 141
assert WGT._addr_luftleistung_aktuell_zuluft == 142
assert WGT._addr_luftleistung_aktuell_abluft == 143
assert WGT._addr_drehzahl_aktuell_zuluft == 144
assert WGT._addr_drehzahl_aktuell_abluft == 145
assert WGT._addr_t1_nach_erdwaermetauscher == 200
assert WGT._addr_t2_nach_vorheizregister == 201
assert WGT._addr_t3_vor_nacherwaermung == 202
assert WGT._addr_t4_nach_nacherwaermung == 203
assert WGT._addr_t5_abluft == 204
assert WGT._addr_t6_waermetauscher == 205
assert WGT._addr_t7_verdampfer == 206
assert WGT._addr_t8_kondensator == 207
assert WGT._addr_t10_aussen == 209
assert WGT._addr_heizen_kuehlen == 230
assert WGT._addr_waermepumpe_heizen == 231
assert WGT._addr_waermepumpe_kuehlen == 232
assert WGT._addr_zusatzheizung_haus == 234
assert WGT._addr_druckwaechter == 242
assert WGT._addr_evu_sperre == 243
assert WGT._addr_tuer_offen == 244
assert WGT._addr_geraetefilter_verschmutzt == 245
assert WGT._addr_geraetefilter_vorgelagert_verschmutzt == 246
assert WGT._addr_niedertarif_abgeschaltet == 247
assert WGT._addr_versorgungsspannung_abgeschaltet == 248
assert WGT._addr_pressostat == 250
assert WGT._addr_evu_sperre_extern == 251
assert WGT._addr_heizmodul_testbetrieb == 252
assert WGT._addr_notbetrieb == 253
assert WGT._addr_zuluft_zu_kalt == 254
assert WGT._addr_geraetefilter_restlaufzeit == 265
assert WGT._addr_geraetefilter_vorgelagert_restlaufzeit == 263
assert WGT._addr_fehler == 240
assert WGT._addr_temperatur_raum1_ist == 360
assert WGT._addr_temperatur_raum1_soll == 400
assert WGT._addr_temperatur_raum1_basis == 420
assert WGT._addr_zusatzheizung_raum1_freigabe == 440
assert WGT._addr_zusatzheizung_raum1_aktiv == 460
assert WGT._addr_zeitprogramm_heizen_raum1 == 500
assert WGT._addr_betriebsstunden_luefter_gesamt == 800
assert WGT._addr_betriebsstunden_luefter_stufe1 == 801
assert WGT._addr_betriebsstunden_luefter_stufe2 == 802
assert WGT._addr_betriebsstunden_luefter_stufe3 == 803
assert WGT._addr_betriebsstunden_luefter_stufe4 == 804
assert WGT._addr_betriebsstunden_waermepumpe_gesamt == 805
assert WGT._addr_betriebsstunden_waermepumpe_kuehlen == 806
assert WGT._addr_betriebsstunden_vorheizregister == 809
assert WGT._addr_betriebsstunden_zusatzheizung == 810
assert WGT._addr_betriebsstunden_erdwaermetauscher == 813
if __name__ == "__main__":
pytest.main()
| 42.329412 | 66 | 0.770984 |
1c344aad8538ab1fa6efeddea18a32c48a3a3f05
| 8,335 |
py
|
Python
|
Packs/Exabeam/Integrations/Exabeam/test_data/response_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Exabeam/Integrations/Exabeam/test_data/response_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Exabeam/Integrations/Exabeam/test_data/response_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
RESPONSE_PEER_GROUPS = [
"Marketing",
"usa",
"101",
"Program Manager",
"Channel Administrator",
"Chief Marketing Officer",
"",
"Chief Strategy Officer",
"CN=Andrew",
"BitLockerUsersComputers"
]
RESPONSE_USER_LABELS = [
"privileged_user",
"service_account"
]
RESPONSE_WATCHLISTS = [
{
"category": "UserLabels",
"title": "Executive Users",
"watchlistId": "1234"
},
{
"category": "UserLabels",
"title": "Service Accounts",
"watchlistId": "1111"
},
{
"category": "Users",
"title": "user watchlist",
"watchlistId": "2222"
},
{
"category": "PeerGroups",
"title": "VP Operations",
"watchlistId": "3333"
}
]
RESPONSE_ASSET_DATA = {
"asset": {
"assetType": "Windows",
"compromisedTime": 0,
"firstSeen": 1530627660000,
"hostName": "name",
"ipAddress": "1.2.3.4",
"lastSeen": 1538324597000
}
}
RESPONSE_SESSION_INFO = { 'sessionInfo': {
"numOfAssets": 29,
"riskScore": 0,
"numOfAccounts": 1,
"accounts": [],
"zones": [],
"endTime": "1591071360000",
"numOfZones": 5,
"startTime": "1591021860000",
"loginHost": "lt-dummy-888",
"sessionId": "dummy-20200601143100",
"numOfReasons": 0,
"label": "",
"username": "dummy",
"numOfSecurityEvents": 0,
"numOfEvents": 62,
"initialRiskScore": 0
}
}
RESPONSE_MODEL_DATA = {
"agingWindow": 32,
"alpha": 0.8,
"binWidth": None,
"category": "Other",
"convergenceFilter": "confidence_factor>=0.8",
"cutOff": 5,
"description": "Models which security groups users are being added to in the organization",
"disabled": "FALSE",
"feature": "group_name",
"featureName": "group_name",
"featureType": "group_name",
"histogramEventTypes": "member-added",
"iconName": None,
"maxNumberOfBins": 1000000,
"modelTemplate": "Account management, groups which users are being added to",
"modelType": "CATEGORICAL",
"name": "dummy",
"scopeType": "ORG",
"scopeValue": "org",
"trainIf": "TRUE"
}
RESPONSE_NOTABLE_ASSET_DATA = {
'assets': [{
'asset': {
'hostName': 'host',
'ipAddress': '1.1.1.1',
'assetType': 'test',
'firstSeen': 1591022160000,
'lastSeen': 1593820320000
},
'highestRiskScore': 150,
'highestRiskSequence': {
'id': '1111',
'entityName': 'asset',
'entityValue': 'test',
'day': 1593648000000,
'triggeredRuleCountOpt': 15,
'riskScoreOpt': 150.0
},
'latestAssetComment': {
'commentId': 'test1111',
'commentType': 'asset',
'commentObjectId': 'test',
'text': 'test',
'exaUser': 'test',
'exaUserFullname': '',
'createTime': 1612275291188,
'updateTime': 1612275291188,
'edited': False
}
}]
}
RESPONSE_NOTABLE_SESSION_DETAILS = {
'totalCount': 2, 'sessions': [
{'sessionId': 'session1', 'username': 'username1', 'startTime': 1593704040000,
'endTime': 1593727380000, 'initialRiskScore': 0, 'riskScore': 110, 'numOfReasons': 9,
'loginHost': 'host1', 'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2'], 'numOfZones': 2, 'numOfAssets': 7, 'numOfEvents': 6,
'numOfSecurityEvents': 0},
{'sessionId': 'session2', 'username': 'username2', 'startTime': 1593682380000,
'endTime': 1593727260000, 'initialRiskScore': 26, 'riskScore': 313, 'numOfReasons': 39, 'loginHost': 'host2',
'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2', 'zone3', 'zone4'], 'numOfZones': 4,
'numOfAssets': 17, 'numOfEvents': 30, 'numOfSecurityEvents': 1, 'riskTransferScore': 126.0}],
'users': {
'username2': {'username': 'username2', 'riskScore': 313.18, 'averageRiskScore': 171.41,
'pastScores': [287.19, 218.36, 0.0, 0.0, 0.0, 0.0, 0.0], 'lastSessionId': 'session2',
'firstSeen': 1591021500000, 'lastSeen': 1593820320000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593818940000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': '[email protected]',
'employeeType': 'employee', 'fullName': 'user username2',
'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'},
'labels': [],
'pendingRiskTransfers': []},
'mburgess': {'username': 'username1', 'riskScore': 109.73, 'averageRiskScore': 52.25,
'pastScores': [109.7382543963077], 'lastSessionId': 'session1',
'firstSeen': 1591025220000, 'lastSeen': 1593727380000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593704040000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': '[email protected]',
'employeeType': 'employee',
'fullName': 'user username1', 'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'}, 'labels': [],
'pendingRiskTransfers': []}},
'executiveUserFlags': {'username1': False, 'username2': False}
}
RESPONSE_NOTABLE_SEQUENCE_DETAILS = [{
'sequenceId': 'ID',
'isWhitelisted': False,
'areAllTriggeredRulesWhiteListed': False,
'sequenceInfo': {
'startTime': 1593648000000,
'endTime': 1593734399999,
'riskScore': 150,
'numOfReasons': 8,
'numOfEvents': 18,
'numOfUsers': 4,
'numOfSecurityEvents': 0,
'numOfZones': 3,
'numOfAssets': 8,
'sequenceId': 'ID',
'assetId': 'ID'},
'hasBeenPartiallyWhiteListed': False
}]
RESPONSE_NOTABLE_SEQUENCE_EVENTS = [{
'eventType': 'type1',
'displayName': 'dn1',
'count': 1},
{'eventType': 'type2',
'displayName': 'dn2',
'count': 1},
{'eventType': 'type3',
'displayName': 'dn3',
'count': 1},
{'eventType': 'type4',
'displayName': 'dn4',
'count': 1},
{'eventType': 'type5',
'displayName': 'dn5',
'count': 2},
{'eventType': 'type6',
'displayName': 'dn6',
'count': 2},
{'eventType': 'type7',
'displayName': 'dn7',
'count': 8},
{'eventType': 'type8',
'displayName': 'dn8',
'count': 1},
{'eventType': 'type9',
'displayName': 'dn9',
'count': 1}
]
DELETE_RECORD_RESPONSE = {'sessionId': '56a5b19a-4193-4616-9978-0bbabb1e2d60',
'recordChanges': [{
'changeType': 'removed',
'changeId': '4aad5392-20e7-4423-abcb-a9680c566215',
'record': {'key': '', 'id': 'test_key'}
}],
'metadata': {'createdSize': 0, 'updatedSize': 0, 'removedSize': 1, 'duplicates': []}}
| 35.317797 | 118 | 0.486623 |
1c73dd669e3d5d8f2a77870c748f6bd2534d6e15
| 942 |
py
|
Python
|
数据结构/NowCode/29_VerifySquenceOfBST.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | null | null | null |
数据结构/NowCode/29_VerifySquenceOfBST.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 3 |
2020-08-14T07:50:27.000Z
|
2020-08-14T08:51:06.000Z
|
数据结构/NowCode/29_VerifySquenceOfBST.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 2 |
2021-03-14T05:58:45.000Z
|
2021-08-29T17:25:52.000Z
|
# 输入一个整数数组,判断该数组是不是某二叉搜索树的后序遍历的结果。如果是则输出Yes,否则输出No。假设输入的数组的任意两个数字都互不相同。
class Solution:
def VerifySquenceOfBST(self, sequence):
if sequence == []:
return False
# 找ROOT节点,也就是最后一个
root = sequence[-1]
# 删除队列中的末尾节点
del sequence[-1]
# 寻找出划分的节点
index = None
for i in range(len(sequence)):
# 只寻找一次,就不进入了
if index == None and sequence[i] > root:
index = i
# 当我们找到一个大的数,然后往后又找到一个更小的数,那么就无法组成二叉搜索树
if index != None and sequence[i] < root:
return False
if sequence[:index] == []:
left = True
else:
# 寻找左子树和右子树
left = self.VerifySquenceOfBST(sequence[:index])
if sequence[index:] == []:
right = True
else:
right = self.VerifySquenceOfBST(sequence[index:])
# 返回结果
return left and right
| 30.387097 | 71 | 0.530786 |
c7add1482f438efd02b081cb25ef659e53ada2cb
| 1,301 |
py
|
Python
|
3kCTF/2021/web/pawnshop/apache/src/funcs.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
3kCTF/2021/web/pawnshop/apache/src/funcs.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
3kCTF/2021/web/pawnshop/apache/src/funcs.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import cgi, cgitb ,json
from elasticsearch import Elasticsearch
from email.utils import parseaddr
import json
import re
es_client = Elasticsearch(['http://172.30.0.7:9200'])
def api(jsonv):
print("Content-type: application/json\r\n\r\n")
print(json.dumps(jsonv))
exit()
def verify_email(mail):
parsedEmail = parseaddr(mail)[1]
if parsedEmail == '' or parsedEmail != mail or not re.findall(r'.+@.+\..+',parsedEmail):
api({'msg':'invalid email'})
def save_bid(tbid):
save_file = open("/dev/null","w")
save_file.write(tbid)
save_file.close()
def list_items():
body = {
"query": {
"match_all": {}
}
}
res = es_client.search(index="pawnshop", body=body)
printout={}
if(len(res['hits']['hits'])>0):
for i in res['hits']['hits']:
printout[i['_id']]={'seller':i['_source']['seller'],'item':i['_source']['item'],"picture":i['_source']['picture']}
api({"list":printout})
api({"msg":"error"})
def lookupSeller(emailAddr):
body = {
'query': {
'query_string': {
'query': 'id:>0 AND seller:"'+emailAddr+'"',
"default_field":"seller"
}
}
}
res = es_client.search(index="pawnshop", body=body)
if(len(res['hits']['hits'])>0):
return emailAddr+' found'
return 'not found'
| 25.019231 | 117 | 0.6103 |
406e0d25757cf95f5bc4eb64641827d4870c2485
| 330 |
py
|
Python
|
Chapter9_AdvancedDL/Chapter9_3_ModelImprovement/bostonLinReg.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | 11 |
2020-10-12T14:06:31.000Z
|
2022-02-22T09:16:32.000Z
|
Chapter9_AdvancedDL/Chapter9_3_ModelImprovement/bostonLinReg.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter9_AdvancedDL/Chapter9_3_ModelImprovement/bostonLinReg.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | 8 |
2020-10-29T07:53:49.000Z
|
2022-03-17T11:01:20.000Z
|
from sklearn.linear_model import LinearRegression
from tf_utils.bostonData import BOSTON
if __name__ == "__main__":
boston = BOSTON()
x_train, y_train = boston.get_train_set()
x_test, y_test = boston.get_test_set()
regr = LinearRegression()
regr.fit(x_train, y_train)
print(regr.score(x_test, y_test))
| 23.571429 | 49 | 0.724242 |
29351fd1c6181f52ca793f43846030420bd0964f
| 838 |
py
|
Python
|
exercises/fr/test_02_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/fr/test_02_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/fr/test_02_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"import Doc, Span" or "import Span, Doc" in __solution__
), "As-tu correctement importé Doc et Span ?"
assert doc.text == "Elle aime David Bowie", "As-tu correctement créé le Doc ?"
assert span.text == "David Bowie", "As-tu correctement créé le span ?"
assert span.label_ == "PERSON", "As-tu ajouté le label PERSON au span?"
assert "doc.ents =" in __solution__, "As-tu réécrit doc.ents ?"
assert len(doc.ents) == 1, "As-tu ajouté le span à doc.ents ?"
assert (
list(doc.ents)[0].text == "David Bowie"
), "As-tu ajouté le span à doc.ents ?"
__msg__.good(
"Parfait ! Savoir créer manuellement des objets de spaCy et modifier "
"les entités sera utile plus tard quand tu créeras tes propres "
"pipelines d'extraction d'informations."
)
| 46.555556 | 82 | 0.640811 |
2976a5a2e0b2c451f863a1b16267138cce012fa5
| 1,200 |
py
|
Python
|
0-notes/job-search/Leetcode/~02-RemoveKDigits.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Leetcode/~02-RemoveKDigits.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Leetcode/~02-RemoveKDigits.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# Given a non-negative integer num represented as a string, remove k digits from the number
# so that the new number is the smallest possible.
# NOTE: The length of num is less than 10002 and will be ≥ k.
# NOTE: The given num does not contain any leading zero.
'''
Example 1:
Input: num = "1432219", k = 3 | Output: "1219"
Explanation: Remove the three digits 4, 3, and 2
to form the new number 1219 which is the smallest.
'''
'''
Example 2:
Input: num = "10200", k = 1 | Output: "200"
Explanation: Remove the leading 1 and the number is 200.
Note that the output must not contain leading zeroes.
'''
'''
Example 3:
Input: num = "10", k = 2 | Output: "0"
Explanation: Remove all the digits from the number and it is left with nothing which is 0.
'''
# time complexity: O(n^2) (2 nested loops)
# space complexity: O(1)
def removeKdigits(num, k):
res = []
counter = 0
n = len(num)
if n == k: return "0"
for i in range(n):
while k and res and res[-1] > num[i]:
res.pop()
k -= 1
res.append(num[i])
while k:
res.pop()
k -= 1
return "".join(res).lstrip('0') or "0"
| 25.531915 | 92 | 0.598333 |
9ed4004596f156c049e789a78b6d553b3ed10e12
| 1,077 |
py
|
Python
|
TreeModelLib/TreeModel.py
|
jvollhueter/pyMANGA-1
|
414204a394d44405225b4b8224b19464c1006f1d
|
[
"MIT"
] | null | null | null |
TreeModelLib/TreeModel.py
|
jvollhueter/pyMANGA-1
|
414204a394d44405225b4b8224b19464c1006f1d
|
[
"MIT"
] | null | null | null |
TreeModelLib/TreeModel.py
|
jvollhueter/pyMANGA-1
|
414204a394d44405225b4b8224b19464c1006f1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@date: 2018-Today
@author: [email protected]
"""
class TreeModel:
def prepareNextTimeStep(self, t_ini, t_end):
try:
self.concept.prepareNextTimeStep(t_ini, t_end)
except AttributeError:
self.raiseAttributeError("prepareNextTimeStep")
def addTree(self, x, y, geometry, parameter):
try:
self.concept.addTree(x, y, geometry, parameter)
except AttributeError:
self.raiseAttributeError("addTree")
def progressTree(self, tree, aboveground_resources, belowground_resources):
try:
self.concept.progressTree(tree, aboveground_resources,
belowground_resources)
except AttributeError:
self.raiseAttributeError("progressTree")
def raiseAttributeError(self, string):
raise AttributeError("Function '" + string + "' is " +
"required for " + self.getConceptType() +
" but not implemented!")
| 32.636364 | 79 | 0.600743 |
81fc2e38199db7eabafb311039595b8e7ff7a4cc
| 91 |
py
|
Python
|
python/pathlib/origin/rename.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pathlib/origin/rename.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pathlib/origin/rename.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from pathlib import Path
path = Path('names.txt')
path.touch()
path.rename('mynames.txt')
| 15.166667 | 26 | 0.725275 |
6f0aa528b4b8a3288c90ecba6ebb50d2787533dc
| 261 |
py
|
Python
|
app/views/models/sessions.py
|
zhiyong-lv/flask-login
|
d8bf0719bae19ba8f7f44ea6d6a8ca65ba22aa63
|
[
"MIT"
] | null | null | null |
app/views/models/sessions.py
|
zhiyong-lv/flask-login
|
d8bf0719bae19ba8f7f44ea6d6a8ca65ba22aa63
|
[
"MIT"
] | null | null | null |
app/views/models/sessions.py
|
zhiyong-lv/flask-login
|
d8bf0719bae19ba8f7f44ea6d6a8ca65ba22aa63
|
[
"MIT"
] | null | null | null |
from flask_restplus import Model, fields
session_json = Model('Session Input', {
'username': fields.String(required=True, description='The user name', attribute='username'),
'password': fields.String(required=True, description='The user password'),
})
| 37.285714 | 96 | 0.739464 |
48c94a7d57c69b47e22d4866ea75111eeb636145
| 3,621 |
py
|
Python
|
synthetic/scripts/gen_data_2.py
|
zhewang/rdm
|
b2c00c5db6ee4861c1b7214e3a63b630be30bb31
|
[
"Apache-2.0"
] | null | null | null |
synthetic/scripts/gen_data_2.py
|
zhewang/rdm
|
b2c00c5db6ee4861c1b7214e3a63b630be30bb31
|
[
"Apache-2.0"
] | null | null | null |
synthetic/scripts/gen_data_2.py
|
zhewang/rdm
|
b2c00c5db6ee4861c1b7214e3a63b630be30bb31
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import io
import struct
import json
import random
import numpy as np
import itertools
import math
import sys
def GetGaussInt(a, b):
value = random.gauss((a+b)/2, (b-a)/4)
while int(value) < a or int(value) > b:
value = random.gauss((a+b)/2, (b-a)/2)
return int(value)
def header():
header = "name: test_file\n"+ \
"encoding: binary\n"+ \
"field: location nc_dim_quadtree_{0}\n"+ \
"field: test_category nc_dim_cat_1\n"+ \
"valname: test_category 0 CATEGORY_A\n"+ \
"valname: test_category 1 CATEGORY_B\n"+ \
"valname: test_category 2 CATEGORY_C\n"+ \
"valname: test_category 3 CATEGORY_D\n"+ \
"valname: test_category 4 CATEGORY_E\n"+ \
"metadata: tbin 2016-01-01_00:00:00_3600s\n"+ \
"field: time nc_dim_time_2\n"
header = header.format(LEVEL)
# This includes the count dimension, not includes time dimension
for i in range(count):
header = header + 'field: dim' + str(i) + ' nc_var_float_8' + '\n'
sys.stdout.write(header+'\n')
def getXY():
key = [-1,-1]
means = {1:[7,2], 2:[2,7], 3:[2,2]}
r = random.randint(1,3)
while (0 < key[0] < 10) is False or (0 < key[1] < 10) is False:
key = np.random.multivariate_normal(means[r], np.diag([3,3])).tolist()
return key
def body():
global flag
data_schema = []
# Sample keys
for n in range(ROWS):
key = [n%4+0.5, n%4+0.5, 1]
if args.f is False:
key = getXY()
key.append(10-abs(key[0]-key[1]))
mean = [0 for i in range(DIMS)]
cov = np.diag(key)
v = np.random.multivariate_normal(mean, cov).tolist()
# Calculate all comination for variables
var = [1]+v
if flag is True:
data_schema.append('count')
for i in range(len(v)):
data_schema.append(str(i))
comb = itertools.combinations_with_replacement(range(DIMS), 2)
for c in comb:
var.append(v[int(c[0])]*v[int(c[1])])
if flag is True:
data_schema.append(str(c[0])+'*'+str(c[1]))
var = [round(i, 6) for i in var]
if flag is True:
print("NanoCube variable dimensions: "+str(count))
print('Variable Schema: {}'.format(data_schema))
flag = False
if args.s is True:
sys.exit(0)
# Dump
pack_str = '<iiBH' + 'd'*count
resolution = 2**LEVEL
xRange = xMax-xMin
yRange = yMax-yMin
xTile = int(resolution*((key[0]*1.0-xMin)/xRange))
yTile = int(resolution*((key[1]*1.0-yMin)/yRange))
binStr = struct.pack(pack_str,xTile,yTile,GetGaussInt(0,4),0,*var)
sys.stdout.write(binStr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate testing data')
parser.add_argument('-f', action='store_true', default=False, help='fixed position')
parser.add_argument('-s', action='store_true', default=False, help='only show schema of dmp file')
parser.add_argument('-d', type=int, default=3, help='dimension of features')
parser.add_argument('-r', type=int, default=1, help='number of rows')
parser.add_argument('-l', type=int, default=15, help='quadtree level')
args = parser.parse_args()
DIMS = args.d
ROWS = args.r
LEVEL = args.l
xMin = 0
xMax = 10
yMin = 0
yMax = 10
flag = args.s
count = DIMS*(DIMS+1)/2 + DIMS + 1
if flag is False:
header()
body()
| 29.680328 | 102 | 0.570284 |
82d90aedce759a77fd769f240bf4e76ce74a0b58
| 304 |
py
|
Python
|
python/DeepLearning/test.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | 1 |
2019-05-22T07:12:34.000Z
|
2019-05-22T07:12:34.000Z
|
python/DeepLearning/test.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | 3 |
2021-12-10T01:13:54.000Z
|
2021-12-14T21:18:42.000Z
|
python/DeepLearning/test.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
# 激活函数
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
if __name__ == '__main__':
a = np.array([[1, 2, 3]])
assert a.shape == (1, 3)
a = np.squeeze(a)
print(a.shape)
ret = sigmoid(np.array([-10000000, -4, -3, -2, -1, 0, 1, 2, 3, 4]))
print(ret)
| 16 | 71 | 0.503289 |
7dd22c8bf8f13865cfa0cc4f392b4be6d1354930
| 1,673 |
py
|
Python
|
classification/predict_classifier_1.py
|
gitskim/DnntalPrivate
|
20f84782e9ab20c68b43f32efb16cd22262b8ceb
|
[
"MIT"
] | null | null | null |
classification/predict_classifier_1.py
|
gitskim/DnntalPrivate
|
20f84782e9ab20c68b43f32efb16cd22262b8ceb
|
[
"MIT"
] | null | null | null |
classification/predict_classifier_1.py
|
gitskim/DnntalPrivate
|
20f84782e9ab20c68b43f32efb16cd22262b8ceb
|
[
"MIT"
] | 2 |
2019-05-16T05:48:26.000Z
|
2021-01-27T01:26:22.000Z
|
from keras.models import load_model
import h5py
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from PIL import Image
from skimage.io import imread
from keras.models import Model
import cv2
from keras import backend as K
model_path_classifier = '../Flaskapp/models/model_classifier.h5'
model = load_model(model_path_classifier)
def predict(img):
# We will convert into a image with 3 channels.
# We will normalize the pixel values and resizing all the images to 224x224
img = cv2.imread(str(img))
return result_predict(img)
def result_predict(img):
valid_data = []
img = cv2.resize(img, (224, 224))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.
valid_data.append(img)
# Convert the list into numpy arrays
data = np.array(valid_data)
result = model.predict(x=data)
return result
# def post_processing(path_img):
#print(predict('../../Dataset/dentist_AI/cropped/val/cropped_positive_xrays/file2_20.jpg'))
def post_processing(img_path):
img=cv2.imread(img_path)
array2 = img.copy()
length,wide, z = img.shape
size = 224
length = int(length/size)*size
wide = int(wide/size)*size
for x in range(0,wide,size):
for y in range(0,length,size):
crop = array2[y:y+size,x:x+size]
# Send to predcit
number = result_predict(crop)[0][1]
if number > 0.5:
array2[y:y+size,x:x+size]=0
resultat=Image.fromarray(array2, 'RGB')
return resultat
| 26.983871 | 91 | 0.662881 |
8187f32f636a41836e6bcddf20b893841022962a
| 92 |
py
|
Python
|
2015/misc/test-table-1/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2015/misc/test-table-1/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2015/misc/test-table-1/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1yUpR-uFHjHoZvh9fOAmiF_-3KXZw01F1Ysi2qUhe0Ns'
| 23 | 68 | 0.826087 |
c4c7058ec6a032513d2812ed70f1b0ce7f05eebc
| 8,449 |
py
|
Python
|
Micropython_ESP8266/fontlib_rot.py
|
arkahu/Stuff
|
6b5955444a4c538ce194764c28389e7043ab5cdd
|
[
"MIT"
] | null | null | null |
Micropython_ESP8266/fontlib_rot.py
|
arkahu/Stuff
|
6b5955444a4c538ce194764c28389e7043ab5cdd
|
[
"MIT"
] | null | null | null |
Micropython_ESP8266/fontlib_rot.py
|
arkahu/Stuff
|
6b5955444a4c538ce194764c28389e7043ab5cdd
|
[
"MIT"
] | null | null | null |
#Shortened from Standard_rotated to fit RAM
# Font: Standard.pf
fonts = (
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 000 (.)
0x7E, 0x81, 0x95, 0xB1, 0xB1, 0x95, 0x81, 0x7E, # Char 001 (.)
0x7E, 0xFF, 0xEB, 0xCF, 0xCF, 0xEB, 0xFF, 0x7E, # Char 002 (.)
0x0E, 0x1F, 0x3F, 0x7E, 0x3F, 0x1F, 0x0E, 0x00, # Char 003 (.)
0x08, 0x1C, 0x3E, 0x7F, 0x3E, 0x1C, 0x08, 0x00, # Char 004 (.)
0x18, 0x18, 0x5B, 0x6F, 0x6F, 0x5B, 0x18, 0x18, # Char 005 (.)
0x18, 0x1C, 0x5E, 0x6F, 0x5E, 0x1C, 0x18, 0x00, # Char 006 (.)
0x00, 0x00, 0x18, 0x3C, 0x3C, 0x18, 0x00, 0x00, # Char 007 (.)
0xFF, 0xFF, 0xE7, 0xC3, 0xC3, 0xE7, 0xFF, 0xFF, # Char 008 (.)
0x00, 0x3C, 0x66, 0x42, 0x42, 0x66, 0x3C, 0x00, # Char 009 (.)
0xFF, 0xC3, 0x99, 0xBD, 0xBD, 0x99, 0xC3, 0xFF, # Char 010 (.)
0x70, 0xF8, 0x88, 0x88, 0xFD, 0x7F, 0x07, 0x0F, # Char 011 (.)
0x00, 0x4E, 0x5F, 0xF1, 0xF1, 0x5F, 0x4E, 0x00, # Char 012 (.)
0x40, 0x60, 0x60, 0x60, 0x3F, 0x02, 0x0C, 0x00, # Char 013 (.)
0x20, 0x30, 0x30, 0x9F, 0xC5, 0xCA, 0x7C, 0x00, # Char 014 (.)
0x08, 0x2A, 0x1C, 0x77, 0x1C, 0x2A, 0x08, 0x00, # Char 015 (.)
0x7F, 0x3E, 0x3E, 0x1C, 0x1C, 0x08, 0x08, 0x00, # Char 016 (.)
0x08, 0x08, 0x1C, 0x1C, 0x3E, 0x3E, 0x7F, 0x00, # Char 017 (.)
0x00, 0x14, 0x22, 0x7F, 0x7F, 0x22, 0x14, 0x00, # Char 018 (.)
0x00, 0x5F, 0x5F, 0x00, 0x00, 0x5F, 0x5F, 0x00, # Char 019 (.)
0x0E, 0x1F, 0x11, 0x7F, 0x7F, 0x01, 0x7F, 0x7F, # Char 020 (.)
0x40, 0x98, 0xA6, 0xA5, 0x65, 0x19, 0x02, 0x00, # Char 021 (.)
0x00, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x00, # Char 022 (.)
0x00, 0x94, 0xA2, 0xFF, 0xFF, 0xA2, 0x94, 0x00, # Char 023 (.)
0x00, 0x04, 0x02, 0x7F, 0x7F, 0x02, 0x04, 0x00, # Char 024 (.)
0x00, 0x10, 0x20, 0x7F, 0x7F, 0x20, 0x10, 0x00, # Char 025 (.)
0x08, 0x08, 0x08, 0x2A, 0x3E, 0x1C, 0x08, 0x00, # Char 026 (.)
0x08, 0x1C, 0x3E, 0x2A, 0x08, 0x08, 0x08, 0x00, # Char 027 (.)
0x3C, 0x3C, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, # Char 028 (.)
0x08, 0x1C, 0x2A, 0x08, 0x08, 0x2A, 0x1C, 0x08, # Char 029 (.)
0x30, 0x38, 0x3C, 0x3E, 0x3C, 0x38, 0x30, 0x00, # Char 030 (.)
0x06, 0x0E, 0x1E, 0x3E, 0x1E, 0x0E, 0x06, 0x00, # Char 031 (.)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, # Char 032 ( )
0x00, 0x00, 0x06, 0x5F, 0x5F, 0x06, 0x00, 0x00, # Char 033 (!)
0x00, 0x01, 0x07, 0x00, 0x01, 0x07, 0x00, 0x00, # Char 034 (")
0x14, 0x7F, 0x7F, 0x14, 0x7F, 0x7F, 0x14, 0x00, # Char 035 (#)
0x24, 0x2E, 0x2A, 0x7F, 0x2A, 0x3A, 0x10, 0x00, # Char 036 ($)
0x4C, 0x6A, 0x36, 0x18, 0x6C, 0x56, 0x32, 0x00, # Char 037 (%)
0x30, 0x7A, 0x4D, 0x4D, 0x7F, 0x32, 0x50, 0x00, # Char 038 (&)
0x00, 0x00, 0x05, 0x03, 0x00, 0x00, 0x00, 0x00, # Char 039 (')
0x00, 0x1C, 0x3E, 0x63, 0x41, 0x00, 0x00, 0x00, # Char 040 (()
0x00, 0x41, 0x63, 0x3E, 0x1C, 0x00, 0x00, 0x00, # Char 041 ())
0x08, 0x2A, 0x3E, 0x1C, 0x1C, 0x3E, 0x2A, 0x08, # Char 042 (*)
0x08, 0x08, 0x3E, 0x3E, 0x08, 0x08, 0x00, 0x00, # Char 043 (+)
0x00, 0x00, 0xA0, 0x60, 0x00, 0x00, 0x00, 0x00, # Char 044 (,)
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, # Char 045 (-)
0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00, 0x00, # Char 046 (.)
0x40, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x00, # Char 047 (/)
0x3E, 0x7F, 0x79, 0x4D, 0x47, 0x7F, 0x3E, 0x00, # Char 048 (0)
0x00, 0x44, 0x46, 0x7F, 0x7F, 0x40, 0x40, 0x00, # Char 049 (1)
0x62, 0x73, 0x51, 0x59, 0x49, 0x6F, 0x66, 0x00, # Char 050 (2)
0x22, 0x63, 0x49, 0x49, 0x49, 0x7F, 0x36, 0x00, # Char 051 (3)
0x18, 0x1C, 0x16, 0x53, 0x7F, 0x7F, 0x50, 0x00, # Char 052 (4)
0x27, 0x67, 0x45, 0x45, 0x45, 0x7D, 0x39, 0x00, # Char 053 (5)
0x3E, 0x7F, 0x49, 0x49, 0x49, 0x7B, 0x32, 0x00, # Char 054 (6)
0x03, 0x03, 0x71, 0x79, 0x0D, 0x07, 0x03, 0x00, # Char 055 (7)
0x36, 0x7F, 0x49, 0x49, 0x49, 0x7F, 0x36, 0x00, # Char 056 (8)
0x26, 0x6F, 0x49, 0x49, 0x49, 0x7F, 0x3E, 0x00, # Char 057 (9)
0x00, 0x00, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, # Char 058 (:)
0x00, 0x00, 0xA2, 0x62, 0x00, 0x00, 0x00, 0x00, # Char 059 (;)
0x00, 0x08, 0x1C, 0x36, 0x63, 0x41, 0x00, 0x00, # Char 060 (<)
0x00, 0x24, 0x24, 0x24, 0x24, 0x24, 0x24, 0x00, # Char 061 (=)
0x00, 0x41, 0x63, 0x36, 0x1C, 0x08, 0x00, 0x00, # Char 062 (>)
0x02, 0x03, 0x51, 0x59, 0x0F, 0x06, 0x00, 0x00, # Char 063 (?)
0x3E, 0x41, 0x49, 0x55, 0x55, 0x5D, 0x1E, 0x00, # Char 064 (@)
0x7E, 0x7F, 0x09, 0x09, 0x09, 0x7F, 0x7E, 0x00, # Char 065 (A)
0x41, 0x7F, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00, # Char 066 (B)
0x3E, 0x7F, 0x41, 0x41, 0x41, 0x63, 0x22, 0x00, # Char 067 (C)
0x41, 0x7F, 0x7F, 0x41, 0x41, 0x7F, 0x3E, 0x00, # Char 068 (D)
0x41, 0x7F, 0x7F, 0x49, 0x5D, 0x41, 0x63, 0x00, # Char 069 (E)
0x41, 0x7F, 0x7F, 0x49, 0x1D, 0x01, 0x03, 0x00, # Char 070 (F)
0x3E, 0x7F, 0x41, 0x41, 0x51, 0x77, 0x76, 0x00, # Char 071 (G)
0x7F, 0x7F, 0x08, 0x08, 0x08, 0x7F, 0x7F, 0x00, # Char 072 (H)
0x00, 0x00, 0x41, 0x7F, 0x7F, 0x41, 0x00, 0x00, # Char 073 (I)
0x30, 0x70, 0x40, 0x41, 0x7F, 0x3F, 0x01, 0x00, # Char 074 (J)
0x41, 0x7F, 0x7F, 0x08, 0x1C, 0x77, 0x63, 0x00, # Char 075 (K)
0x41, 0x7F, 0x7F, 0x41, 0x40, 0x60, 0x70, 0x00, # Char 076 (L)
0x7F, 0x7E, 0x0C, 0x18, 0x0C, 0x7E, 0x7F, 0x00, # Char 077 (M)
0x7F, 0x7F, 0x06, 0x0C, 0x18, 0x7F, 0x7F, 0x00, # Char 078 (N)
0x3E, 0x7F, 0x41, 0x41, 0x41, 0x7F, 0x3E, 0x00, # Char 079 (O)
0x41, 0x7F, 0x7F, 0x49, 0x09, 0x0F, 0x06, 0x00, # Char 080 (P)
0x3E, 0x7F, 0x41, 0x71, 0x61, 0xFF, 0xBE, 0x00, # Char 081 (Q)
0x41, 0x7F, 0x7F, 0x09, 0x09, 0x7F, 0x76, 0x00, # Char 082 (R)
0x26, 0x6F, 0x49, 0x49, 0x49, 0x7B, 0x32, 0x00, # Char 083 (S)
0x00, 0x07, 0x41, 0x7F, 0x7F, 0x41, 0x07, 0x00, # Char 084 (T)
0x3F, 0x7F, 0x40, 0x40, 0x40, 0x7F, 0x3F, 0x00, # Char 085 (U)
0x0F, 0x1F, 0x30, 0x60, 0x30, 0x1F, 0x0F, 0x00, # Char 086 (V)
0x7F, 0x3F, 0x18, 0x0C, 0x18, 0x3F, 0x7F, 0x00, # Char 087 (W)
0x41, 0x63, 0x3E, 0x1C, 0x3E, 0x63, 0x41, 0x00, # Char 088 (X)
0x00, 0x07, 0x4F, 0x78, 0x78, 0x4F, 0x07, 0x00, # Char 089 (Y)
0x47, 0x63, 0x71, 0x59, 0x4D, 0x67, 0x73, 0x00, # Char 090 (Z)
0x00, 0x7F, 0x7F, 0x41, 0x41, 0x00, 0x00, 0x00, # Char 091 ([)
0x01, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00, # Char 092 (\)
0x00, 0x41, 0x41, 0x7F, 0x7F, 0x00, 0x00, 0x00, # Char 093 (])
0x08, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x08, 0x00, # Char 094 (^)
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, # Char 095 (_)
0x00, 0x00, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00, # Char 096 (`)
0x20, 0x74, 0x54, 0x54, 0x3C, 0x78, 0x40, 0x00, # Char 097 (a)
0x01, 0x7F, 0x7F, 0x48, 0x48, 0x78, 0x30, 0x00, # Char 098 (b)
0x38, 0x7C, 0x44, 0x44, 0x44, 0x6C, 0x28, 0x00, # Char 099 (c)
0x30, 0x78, 0x48, 0x49, 0x3F, 0x7F, 0x40, 0x00, # Char 100 (d)
0x38, 0x7C, 0x54, 0x54, 0x54, 0x5C, 0x18, 0x00, # Char 101 (e)
0x00, 0x48, 0x7E, 0x7F, 0x49, 0x03, 0x02, 0x00, # Char 102 (f)
0x18, 0xBC, 0xA4, 0xA4, 0xF8, 0x7C, 0x04, 0x00, # Char 103 (g)
0x41, 0x7F, 0x7F, 0x08, 0x04, 0x7C, 0x78, 0x00, # Char 104 (h)
0x00, 0x00, 0x44, 0x7D, 0x7D, 0x40, 0x00, 0x00, # Char 105 (i)
0x40, 0xC0, 0x80, 0x88, 0xFA, 0x7A, 0x00, 0x00, # Char 106 (j)
0x41, 0x7F, 0x7F, 0x10, 0x38, 0x6C, 0x44, 0x00, # Char 107 (k)
0x00, 0x00, 0x41, 0x7F, 0x7F, 0x40, 0x00, 0x00, # Char 108 (l)
0x7C, 0x7C, 0x08, 0x78, 0x0C, 0x7C, 0x78, 0x00, # Char 109 (m)
0x04, 0x7C, 0x78, 0x04, 0x04, 0x7C, 0x78, 0x00, # Char 110 (n)
0x38, 0x7C, 0x44, 0x44, 0x44, 0x7C, 0x38, 0x00, # Char 111 (o)
0x84, 0xFC, 0xF8, 0xA4, 0x24, 0x3C, 0x18, 0x00, # Char 112 (p)
0x18, 0x3C, 0x24, 0xA4, 0xFC, 0xFC, 0x80, 0x00, # Char 113 (q)
0x44, 0x7C, 0x78, 0x4C, 0x04, 0x0C, 0x0C, 0x00, # Char 114 (r)
0x08, 0x5C, 0x54, 0x54, 0x54, 0x74, 0x20, 0x00, # Char 115 (s)
0x04, 0x04, 0x3E, 0x7F, 0x44, 0x24, 0x00, 0x00, # Char 116 (t)
0x3C, 0x7C, 0x40, 0x40, 0x3C, 0x7C, 0x40, 0x00, # Char 117 (u)
0x0C, 0x1C, 0x30, 0x60, 0x30, 0x1C, 0x0C, 0x00, # Char 118 (v)
0x3C, 0x7C, 0x60, 0x38, 0x60, 0x7C, 0x3C, 0x00, # Char 119 (w)
0x44, 0x6C, 0x38, 0x10, 0x38, 0x6C, 0x44, 0x00, # Char 120 (x)
0x9C, 0xBC, 0xA0, 0xA0, 0xFC, 0x7C, 0x00, 0x00, # Char 121 (y)
0x4C, 0x64, 0x74, 0x5C, 0x4C, 0x64, 0x00, 0x00, # Char 122 (z)
0x00, 0x00, 0x08, 0x3E, 0x77, 0x41, 0x41, 0x00, # Char 123 ({)
0x00, 0x00, 0x00, 0x77, 0x77, 0x00, 0x00, 0x00, # Char 124 (|)
0x41, 0x41, 0x77, 0x3E, 0x08, 0x00, 0x00, 0x00, # Char 125 (})
0x02, 0x03, 0x01, 0x03, 0x02, 0x03, 0x01, 0x00, # Char 126 (~)
0x70, 0x78, 0x4C, 0x46, 0x4C, 0x78, 0x70, 0x00, # Char 127 (.)
)
def givefont(f, raw=False):
#convert char to index number if not raw
if raw == False:
index = ord(f)
#print (index)
return fonts[8*index : 8*index +8]
| 58.268966 | 63 | 0.617351 |
1efe6490a2180bf4fa387f2890107175b9e586b0
| 1,351 |
py
|
Python
|
helper/res_ex.py
|
team172011/ps_cagebot
|
ab6f7bdbc74ad3baee3feebc4b7b0fa4f726b179
|
[
"MIT"
] | null | null | null |
helper/res_ex.py
|
team172011/ps_cagebot
|
ab6f7bdbc74ad3baee3feebc4b7b0fa4f726b179
|
[
"MIT"
] | null | null | null |
helper/res_ex.py
|
team172011/ps_cagebot
|
ab6f7bdbc74ad3baee3feebc4b7b0fa4f726b179
|
[
"MIT"
] | null | null | null |
"""
Pyhton script for resizing existing images
@author: wimmer simon-justus
@param dir: 1 directory that contains the raw data
@param w: width of file
@param h: height of file
"""
import sys
import os
import cv2
import numpy as np
from time import sleep
# function for resizing existing images
def resize_images(dir,w,h):
c = 0
path = str(dir)+"/" # / on linux
path_results = path+"results"
if not os.path.exists(path_results):
os.makedirs(path_results)
for i in os.listdir(dir):
try:
c += 1
print("resizing: "+path+str(i))
img = cv2.imread(path+str(i), cv2.IMREAD_GRAYSCALE)
img_res = cv2.resize(img,(w,h))
# rotate the image if needed ....
# rows, cols = img_res.shape
# M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
# img_res = cv2.warpAffine(img_res,M,(cols,rows))
# ....comment in if not necessary
cv2.imwrite(path_results+"/"+"5050_"+str(c)+".bmp",img_res) # / on linux
except Exception as e:
print("Fehler, Image wird uebersprungen:\n"+str(e))
try:
dir = sys.argv[1]
w = sys.argv[2]
h = sys.argv[3]
resize_images(dir,int(w),int(h))
except Exception as i:
print("Fehler beim ausfeuren der Scripts. Bitte dir, w und h angeben:\n"+str(i))
| 27.571429 | 84 | 0.606958 |
484bc701fe477e213e50b42304b49b83e8f22d6c
| 17,198 |
py
|
Python
|
scripts/xgboost_classifier.py
|
PyGeoL/GeoL
|
67a5bd2f63091e19041094c14d419055fa5ce6f0
|
[
"MIT"
] | 8 |
2018-03-09T16:44:38.000Z
|
2021-04-07T11:33:30.000Z
|
scripts/xgboost_classifier.py
|
PyGeoL/GeoL
|
67a5bd2f63091e19041094c14d419055fa5ce6f0
|
[
"MIT"
] | 4 |
2020-03-24T15:34:54.000Z
|
2021-06-01T21:54:33.000Z
|
scripts/xgboost_classifier.py
|
PyGeoL/GeoL
|
67a5bd2f63091e19041094c14d419055fa5ce6f0
|
[
"MIT"
] | 1 |
2020-05-13T14:30:55.000Z
|
2020-05-13T14:30:55.000Z
|
"""
Script to create word2vec models, given a set of mapped POIs.
"""
# Authors: Gianni Barlacchi <[email protected]>
# Michele Ferretti <[email protected]>
import argparse
import os
import math
import errno
import pandas as pd
import geopandas as gpd
from geopandas import GeoDataFrame
from shapely.geometry import Point
import sys
sys.path.append("../GeoL")
import numpy as np
import seaborn as sns
sns.set_style("ticks")
sns.set_context("paper")
import sklearn
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV # Perforing grid search
from sklearn.metrics import confusion_matrix
import sklearn.metrics as metrics
import joblib
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15, 4
def printEvalutationMetrics(df_y_test, y_pred):
print(metrics.classification_report(df_y_test.values, y_pred))
print("ACCURACY: {}".format(metrics.accuracy_score(df_y_test.values, y_pred)))
print("F1 SCORE: {}".format(metrics.f1_score(
df_y_test.values, y_pred, average='macro')))
def runExperiment(df_train, df_test, CITY_NAME, SIZE, BASE_DIR_CITY, SIZE1, METRIC, S, WS, C):
OUTPUT_PATH = os.path.join(BASE_DIR_CITY, "train")
OUTPUT_FILENAME = os.path.join(
OUTPUT_PATH, "metrics_s" + str(S) + "_ws" + str(WS) + "_c"+str(C)+".txt")
dfs = []
dim = 200
df = {} # {"area": boro, "cell": dim}
suffix_train = "General"
suffix_test = "General"
df_y_train = df_train['t_predominant']
df_y_test = df_test['t_predominant']
# Baseline
df_train['t_predominant'].value_counts().max()
y_pred = [df_train['t_predominant'].value_counts().idxmax()] * \
len(df_y_test)
print("*****************************" + CITY_NAME +
" "+str(SIZE)+"*********************************")
print("****** BASELINE ******")
# Print Metrics
printEvalutationMetrics(df_y_test, y_pred)
df['model'] = "baseline_"+METRIC + "_s" + \
str(S) + "_ws" + str(WS) + "_c"+str(C)
# metrics.accuracy_score(df_y_test.values, y_pred)
df['accuracy'] = metrics.accuracy_score(df_y_test.values, y_pred)
# metrics.accuracy_score(df_y_test.values, y_pred)
df['f1-score'] = metrics.f1_score(df_y_test.values,
y_pred, average='macro')
# metrics.accuracy_score(df_y_test.values, y_pred)
df['precision'] = metrics.precision_score(
df_y_test.values, y_pred, average='macro')
# metrics.accuracy_score(df_y_test.values, y_pred)
df['recall'] = metrics.recall_score(
df_y_test.values, y_pred, average='macro')
dfs.append(df)
print("**********************")
# # xgboost Classifier
df = {}
print("****** XGBOOST ******")
df_X_train = df_train[[c for c in df_train.columns if c.startswith('f_')]]
df_X_test = df_test[[c for c in df_test.columns if c.startswith('f_')]]
# colsample_bytree=0.8, scale_pos_weight=1, learning_rate=0.1, min_child_weight=5,n_estimators=177, subsample=0.8, max_depth=3, gamma=0)
clf = xgboost.XGBClassifier()
clf.fit(df_X_train.as_matrix(), df_y_train.values.ravel())
y_pred = clf.predict(df_X_test.as_matrix())
# Print Metrics
printEvalutationMetrics(df_y_test, y_pred)
df['model'] = 'GBT_' + METRIC + "_s" + \
str(S) + "_ws" + str(WS) + "_c"+str(C)
# metrics.accuracy_score(df_y_test.values, y_pred)
df['accuracy'] = metrics.accuracy_score(df_y_test.values, y_pred)
# metrics.accuracy_score(df_y_test.values, y_pred)
df['f1-score'] = metrics.f1_score(df_y_test.values,
y_pred, average='macro')
# metrics.accuracy_score(df_y_test.values, y_pred)
df['precision'] = metrics.precision_score(
df_y_test.values, y_pred, average='macro')
# metrics.accuracy_score(df_y_test.values, y_pred)
df['recall'] = metrics.recall_score(
df_y_test.values, y_pred, average='macro')
dfs.append(df)
print(dfs)
df = pd.DataFrame(dfs)
print(df.head())
with open(OUTPUT_FILENAME, 'a') as f:
# Already has column names
if (os.stat(OUTPUT_FILENAME).st_size > 0):
df.to_csv(f, header=False, sep='\t')
else:
df.to_csv(f, header=True, sep='\t')
print('********* CONFUSION MATRIX *******************')
print(confusion_matrix(df_y_test.values, y_pred))
print("********************************************************************************")
# --------------------------- this functions serve for param estimation ------------------------------------
def modelfit(model, X, y, useTrainCV=True, cv_folds=5, early_stopping_rounds=50, verbose=False):
if useTrainCV:
xgb_param = model.get_xgb_params()
xgtrain = xgb.DMatrix(X.values, label=y)
cvresult = xgb.cv(xgb_param, xgtrain,
num_boost_round=model.get_params()['n_estimators'], nfold=cv_folds,
metrics='merror', early_stopping_rounds=early_stopping_rounds) # verbose_eval=True)#show_progress=True)
model.set_params(n_estimators=cvresult.shape[0])
# Fit the algorithm on the data
model.fit(X, y, eval_metric='merror')
if verbose:
score, predictions = evaluate(model, X, y)
print("Score: %f" % score)
def tune(X, y, param_test, verbose=0, learning_rate=0.1, n_estimators=140, max_depth=5, min_child_weight=1, gamma=0,
subsample=0.8, colsample_bytree=0.8, scale_pos_weight=1, reg_alpha=0, seed=28, cv=5):
gsearch = GridSearchCV(
estimator=XGBClassifier(max_depth=max_depth,
learning_rate=learning_rate,
n_estimators=n_estimators,
silent=True,
objective='multi:softmax',
booster='gbtree',
n_jobs=1,
nthread=1,
gamma=gamma,
min_child_weight=min_child_weight,
max_delta_step=0,
subsample=subsample,
colsample_bytree=colsample_bytree,
colsample_bylevel=1,
reg_alpha=reg_alpha,
reg_lambda=1,
scale_pos_weight=scale_pos_weight,
base_score=0.5,
random_state=0,
seed=seed,
missing=None),
param_grid=param_test,
scoring='f1_macro',
n_jobs=2,
iid=False,
cv=cv,
verbose=verbose)
gsearch.fit(X, y)
return gsearch.best_estimator_, gsearch.grid_scores_, gsearch.best_params_, gsearch.best_score_
# return gsearch.best_estimator_, gsearch.cv_results_, gsearch.best_params_, gsearch.best_score_
def evaluate(model, X_test, y_test):
# transform back encoded labels to strings ,e.g.:"Industrial"
predictions = model.predict(X_test)
return sklearn.metrics.f1_score(y_test, predictions, average="macro"), predictions
def train_test(params, X_train, y_train, X_test, y_test, seed, verbose=False):
num_class = len(np.unique(y_train))
model = XGBClassifier(objective='multi:softmax', num_class=num_class, seed=seed)
model.set_params(**params)
# Train and test the model
modelfit(model, X_train, y_train, verbose=verbose)
score, predictions = evaluate(model, X_test, y_test)
return model, score, predictions
# --------------------------- END param estimation ------------------------------------
# TUNING AND TESTING
def build_model_and_tune(tuning, params, X_train, y_train, seed, verbose=True):
# Best score and update of the parameters
def tune_and_update(param_test, parameters):
best_estimator, grid_scores, best_params, best_score = tune(X_train, y_train, param_test, seed=seed, **parameters)
if best_score >= tune_and_update.score:
tune_and_update.score = best_score
params.update(best_params)
tuning.append((parameters.copy(), best_score))
return best_score
tune_and_update.score = float('-inf')
# Build a model with initial parameters
#alg, f1_score, predictions = test_param(params, X_train, y_train, X_test, y_test, seed, verbose=verbose > 1)
#if verbose > 0:
# print('Primo modello\tTesting rmse = ' + str(f1_score) + '\n')
#testing.append((params.copy(), f1_score))
# Tuning of the parameters
params['n_estimators'] = 140
param_test1 = {
'max_depth': list(range(3, 10, 2)),
'min_child_weight': list(range(1, 6, 2))
}
sc = tune_and_update(param_test1, params)
if verbose > 0:
print('Tuning 1\tScore = ' + str(sc))
param_test2 = {
'max_depth': [params['max_depth'] + k for k in [-1, 0, 1] if params['max_depth'] + k > 0],
'min_child_weight': [params['min_child_weight'] + k for k in [-1, 0, 1] if params['min_child_weight'] + k > 0]
}
sc = tune_and_update(param_test2, params)
if verbose > 0:
print('Tuning 2\tScore = ' + str(sc))
param_test2b = {'min_child_weight': [6, 8, 10, 12]}
sc = tune_and_update(param_test2b, params)
if verbose > 0:
print('Tuning 2b\tScore = ' + str(sc))
param_test3 = {'gamma': [i/10.0 for i in range(0, 5)]}
sc = tune_and_update(param_test3, params)
if verbose > 0:
print('Tuning 3\tScore = ' + str(sc))
params['n_estimators'] = 177
param_test4 = {
'subsample': [i/10.0 for i in range(6, 10)],
'colsample_bytree': [i/10.0 for i in range(6, 10)]
}
sc = tune_and_update(param_test4, params)
if verbose > 0:
print('Tuning 4\tScore = ' + str(sc))
ss = int(params['subsample']*100)
csbt = int(params['colsample_bytree']*100)
param_test5 = {
'subsample': [i/100.0 for i in range(max(0, ss-10), ss+5, 5)],
'colsample_bytree': [i/100.0 for i in range(max(0, csbt-10), csbt+5, 5)]
}
sc = tune_and_update(param_test5, params)
if verbose > 0:
print('Tuning 5\tScore = ' + str(sc))
param_test6 = {
'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100]
}
sc = tune_and_update(param_test6, params)
if verbose > 0:
print('Tuning 6\tScore = ' + str(sc))
if 'reg_alpha' in params:
a = math.log10(params['reg_alpha'])
else:
a = 0
param_test7 = {
'reg_alpha': [0] + np.logspace(a-2, a+1, num=4) # [0, 0.001, 0.005, 0.01, 0.05]
}
sc = tune_and_update(param_test7, params)
if verbose > 0:
print('Tuning 7\tScore = ' + str(sc))
param_test8 = {
'n_estimators': [10, 100, 1000, 3000],
'learning_rate': [0.005, 0.01, 0.05, 0.1]
}
sc = tune_and_update(param_test8, params)
if verbose > 0:
print('Tuning 8\tScore = ' + str(sc))
n = math.log10(params['n_estimators'])
l = math.log10(params['learning_rate'])
param_test9 = {
'n_estimators': [int(x) for x in np.logspace(min(1, n-1), n+1, num=3)],
'learning_rate': np.logspace(l-1, l+1, num=3)
}
sc = tune_and_update(param_test9, params)
if verbose > 0:
print('Tuning 9\tScore = ' + str(sc))
return params, tuning
# NO TUNING JUST TRAIN+TESTING
def build_model(params, X_train, y_train, X_test, y_test, seed, verbose=1):
model, score, predictions = train_test(params, X_train, y_train, X_test, y_test, seed, verbose=verbose > 1)
return model, predictions, score
# -----------------------------------------------------------------------------------------------------------
def main(argv):
parser = argparse.ArgumentParser('Run XGBOOST on Cellvector embeddings')
parser.add_argument('-itr', '--input-train',
help='Input train',
action='store',
dest='input_train',
required=True,
type=str)
parser.add_argument('-ite', '--input_test',
help='Input test',
action='store',
dest='input_test',
required=True,
type=str)
parser.add_argument('-dm', '--directory-model',
help='Directory to store outputted model',
action='store',
dest='directory_model',
required=True,
type=str)
parser.add_argument('-dp', '--directory-predictions',
help='Directory to store outputted predictions',
action='store',
dest='directory_predictions',
required=True,
type=str)
parser.add_argument('-t', '--tuning',
help='Enable XGB parameter tuning. Disabled by default',
dest='enable_tuning',
action='store_true',
default=False)
args = parser.parse_args()
model_path = os.path.join(args.directory_model, 'test.model')
pred_path = os.path.join(args.directory_predictions, 'pred.dat')
# Load TRAIN data
df_train = pd.read_csv(args.input_train, sep="\t")
# Load TEST data
df_test = pd.read_csv(args.input_test, sep="\t")
le = preprocessing.LabelEncoder()
labels = le.fit(df_train["target"].values.ravel())
df_train["encoded_target"] = labels.transform(df_train["target"].values.ravel())
df_test["encoded_target"] = labels.transform(df_test["target"].values.ravel())
# Create Train/Test from dataframe
X_train = df_train[[c for c in df_train.columns if c.startswith("f_")]]
y_train = df_train["encoded_target"].values.ravel()
X_test = df_test[[c for c in df_test.columns if c.startswith("f_")]]
y_test = df_test["encoded_target"].values.ravel()
# Check data and Train/Test proportions
print("X_train", len(X_train.values))
print("y_train", len(y_train))
print("X_test", len(X_test.values))
print("y_test", len(y_test))
print("X_train proportions: ", len(X_train.values) /
(len(X_train.values)+len(X_test.values)) * 100)
print("X_test proportions: ", len(X_test.values) /
(len(X_train.values)+len(X_test.values)) * 100)
print("y_train proportions: ", len(y_train) /
(len(y_train)+len(y_test)) * 100)
print("y_test proportions: ", len(y_test) /
(len(y_train)+len(y_test)) * 100)
# Initialize variable for later use
tuning = []
# Initialize model parameters
params = {}
params['learning_rate'] = 0.1
params['n_estimators'] = 1000
params['max_depth'] = 5
params['min_child_weight'] = 1
params['gamma'] = 0
params['subsample'] = 0.8
params['colsample_bytree'] = 0.8
params['scale_pos_weight'] = 1
# If Tuning...
if args.enable_tuning:
# Train + Tune parameters + Test
params, tuning = build_model_and_tune(tuning, params, X_train, y_train, 27)
else:
# Train + Test
params, tuning = build_model(tuning, params, X_train, y_train, 27)
print('\tValutazione modello finale:')
model, score, predictions = train_test(params, X_train, y_train, X_test, y_test, 27)
# save_model(alg, args.directory_model)
joblib.dump(model, model_path)
# save predictions
pred_series = pd.Series(le.inverse_transform(predictions))
pred_series.to_csv(pred_path,index=None, header=False)
# print('\t\tTesting rmse = ')
print("----TUNING----\n")
print(tuning)
# TODO: ERA cosi, io l'ho cambiato sotto va bene? data = X.merge(Y, on=keys)
# ma devi anche iterare su targets????
#data = pd.concat([X_train, y_train])
#std = data.std()
#m = data.min()
#M = data.max()
#print('\t\tdata range = ' + str(M - m))
#print('\t\tdata std = ' + str(std))
#print('\t\trmse/std = ' + str(score/std))
#print('\t\trmse/range = ' + str(score/(M - m)))
#test_res = (score, std, M-m)
# feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
# if feat_imp.shape[0] > 0:
# feat_imp.plot(kind='bar', title='Feature Importances')
# plt.ylabel('Feature Importance Score')
# plt.savefig(os.path.join(outdir + '.pdf'), format="pdf", dpi=800, bbox_inches='tight')
# with open(os.path.join(outdir, 'eval.csv'), 'w+') as csvfile:
# writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
# writer.writerow(['target', 'rmse', 'std', 'rng', 'rmse/std', 'rmse/rng'])
# for key, value in test_res.iteritems():
# rmse, std, rng = value
# writer.writerow([key, rmse, std, rng, rmse/std, rmse/rng])
# csvfile.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 33.721569 | 140 | 0.58815 |
6f87eb77fbc1d283e5287b2e23e237fe220459f7
| 15,879 |
py
|
Python
|
lale/datasets/data_schemas.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | 265 |
2019-08-06T14:45:43.000Z
|
2022-03-30T23:57:48.000Z
|
lale/datasets/data_schemas.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | 467 |
2019-08-08T02:01:21.000Z
|
2022-03-25T16:12:00.000Z
|
lale/datasets/data_schemas.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | 81 |
2019-08-07T19:59:31.000Z
|
2022-03-31T09:11:58.000Z
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Tuple, Type
import numpy as np
import pandas as pd
import scipy.sparse
import lale.type_checking
from lale.helpers import _is_spark_df
from lale.type_checking import JSON_TYPE
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
try:
import py4j.protocol
import pyspark.sql
spark_installed = True
except ImportError:
spark_installed = False
# See instructions for subclassing numpy ndarray:
# https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
class NDArrayWithSchema(np.ndarray):
def __new__(
cls,
shape,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
json_schema=None,
table_name=None,
):
result = super(NDArrayWithSchema, cls).__new__(
cls, shape, dtype, buffer, offset, strides, order # type: ignore
)
result.json_schema = json_schema
result.table_name = table_name
return result
def __array_finalize__(self, obj):
if obj is None:
return
self.json_schema = getattr(obj, "json_schema", None)
self.table_name = getattr(obj, "table_name", None)
# See instructions for subclassing pandas DataFrame:
# https://pandas.pydata.org/pandas-docs/stable/development/extending.html#extending-subclassing-pandas
class DataFrameWithSchema(pd.DataFrame):
_internal_names = pd.DataFrame._internal_names + ["json_schema", "table_name"]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return DataFrameWithSchema
class SeriesWithSchema(pd.Series):
_internal_names = pd.Series._internal_names + ["json_schema", "table_name"]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return SeriesWithSchema
def add_schema(obj, schema=None, raise_on_failure=False, recalc=False) -> Any:
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return obj
if obj is None:
return None
if isinstance(obj, NDArrayWithSchema):
result = obj
elif isinstance(obj, np.ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj
elif isinstance(obj, pd.Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj
elif isinstance(obj, pd.DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif raise_on_failure:
raise ValueError(f"unexpected type(obj) {type(obj)}")
else:
return obj
if recalc:
setattr(result, "json_schema", None)
if getattr(result, "json_schema", None) is None:
if schema is None:
setattr(result, "json_schema", to_schema(obj))
else:
lale.type_checking.validate_is_schema(schema)
setattr(result, "json_schema", schema)
return result
def add_schema_adjusting_n_rows(obj, schema):
assert isinstance(obj, (np.ndarray, pd.DataFrame, pd.Series)), type(obj)
assert schema.get("type", None) == "array", schema
n_rows = obj.shape[0]
mod_schema = {**schema, "minItems": n_rows, "maxItems": n_rows}
result = add_schema(obj, mod_schema)
return result
def add_table_name(obj, name) -> Any:
if obj is None:
return None
if name is None:
return obj
if spark_installed and isinstance(obj, pyspark.sql.DataFrame):
# alias method documentation: https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.alias.html
# Python class DataFrame with method alias(self, alias): https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with method as(alias: String): https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
return obj.alias(name)
if isinstance(obj, NDArrayWithSchema):
result = obj
elif isinstance(obj, np.ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj
elif isinstance(obj, pd.Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj
elif isinstance(obj, pd.DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif isinstance(
obj, (pd.core.groupby.DataFrameGroupBy, pd.core.groupby.SeriesGroupBy)
):
result = obj
elif spark_installed and isinstance(obj, pyspark.sql.GroupedData):
result = obj
else:
raise ValueError(f"unexpected type(obj) {type(obj)}")
setattr(result, "table_name", name)
return result
def get_table_name(obj):
if spark_installed and isinstance(obj, pyspark.sql.DataFrame):
# Python class DataFrame with field self._jdf: https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with field queryExecution: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
# Scala fields turn into Java nullary methods
# Py4J exposes Java methods as Python methods
# Scala class QueryExecution with field analyzed: LogicalPlan: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
spark_query = obj._jdf.queryExecution().analyzed() # type: ignore
try:
# calling spark_df.explain("extended") shows the analyzed contents
# after spark_df.alias("foo"), analyzed contents should be SubqueryAlias
# Scala class SuqueryAlias with field identifier: https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
# str(..) converts the Java string into a Python string
result = str(spark_query.identifier())
except py4j.protocol.Py4JError:
result = None
return result
if (
isinstance(
obj,
(
NDArrayWithSchema,
SeriesWithSchema,
DataFrameWithSchema,
pd.core.groupby.DataFrameGroupBy,
pd.core.groupby.SeriesGroupBy,
),
)
or (spark_installed and isinstance(obj, pyspark.sql.GroupedData))
):
return getattr(obj, "table_name", None)
return None
def strip_schema(obj):
if isinstance(obj, NDArrayWithSchema):
result = np.array(obj)
assert type(result) == np.ndarray
elif isinstance(obj, SeriesWithSchema):
result = pd.Series(obj)
assert type(result) == pd.Series
elif isinstance(obj, DataFrameWithSchema):
result = pd.DataFrame(obj)
assert type(result) == pd.DataFrame
else:
result = obj
return result
def dtype_to_schema(typ) -> JSON_TYPE:
result: JSON_TYPE
if typ is bool or np.issubdtype(typ, np.bool_):
result = {"type": "boolean"}
elif np.issubdtype(typ, np.unsignedinteger):
result = {"type": "integer", "minimum": 0}
elif np.issubdtype(typ, np.integer):
result = {"type": "integer"}
elif np.issubdtype(typ, np.number):
result = {"type": "number"}
elif np.issubdtype(typ, np.string_) or np.issubdtype(typ, np.unicode_):
result = {"type": "string"}
elif isinstance(typ, np.dtype):
if typ.fields:
props = {k: dtype_to_schema(t) for k, t in typ.fields.items()}
result = {"type": "object", "properties": props}
elif typ.shape:
result = shape_and_dtype_to_schema(typ.shape, typ.subdtype)
elif np.issubdtype(typ, np.object_):
result = {"type": "string"}
else:
assert False, f"unexpected dtype {typ}"
else:
assert False, f"unexpected non-dtype {typ}"
lale.type_checking.validate_is_schema(result)
return result
def shape_and_dtype_to_schema(shape, dtype) -> JSON_TYPE:
result = dtype_to_schema(dtype)
for dim in reversed(shape):
result = {"type": "array", "minItems": dim, "maxItems": dim, "items": result}
lale.type_checking.validate_is_schema(result)
return result
def list_tensor_to_shape_and_dtype(ls) -> Optional[Tuple[Tuple[int, ...], Type]]:
if isinstance(ls, (int, float, str)):
return ((), type(ls))
if isinstance(ls, list):
sub_result: Any = "Any"
for item in ls:
item_result = list_tensor_to_shape_and_dtype(item)
if item_result is None:
return None
if sub_result == "Any":
sub_result = item_result
elif sub_result != item_result:
return None
if sub_result == "Any" and len(ls) == 0:
return ((len(ls),) + (), int)
sub_shape, sub_dtype = sub_result
return ((len(ls),) + sub_shape, sub_dtype)
return None
def is_list_tensor(obj) -> bool:
if isinstance(obj, list):
shape_and_dtype = list_tensor_to_shape_and_dtype(obj)
return shape_and_dtype is not None
return False
def list_tensor_to_schema(ls) -> Optional[JSON_TYPE]:
shape_and_dtype = list_tensor_to_shape_and_dtype(ls)
if shape_and_dtype is None:
return None
result = shape_and_dtype_to_schema(*shape_and_dtype)
return result
def ndarray_to_schema(array) -> JSON_TYPE:
assert isinstance(array, np.ndarray)
if (
isinstance(array, NDArrayWithSchema)
and hasattr(array, "json_schema")
and array.json_schema is not None
):
return array.json_schema
return shape_and_dtype_to_schema(array.shape, array.dtype)
def csr_matrix_to_schema(matrix) -> JSON_TYPE:
assert isinstance(matrix, scipy.sparse.csr_matrix)
result = shape_and_dtype_to_schema(matrix.shape, matrix.dtype)
result["isSparse"] = {} # true schema
return result
def dataframe_to_schema(df) -> JSON_TYPE:
assert isinstance(df, pd.DataFrame)
if (
isinstance(df, DataFrameWithSchema)
and hasattr(df, "json_schema")
and df.json_schema is not None
):
return df.json_schema
n_rows, n_columns = df.shape
assert n_columns == len(df.columns) and n_columns == len(df.dtypes)
items = [
{"description": str(col), **dtype_to_schema(df.dtypes[col])}
for col in df.columns
]
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": items,
},
}
lale.type_checking.validate_is_schema(result)
return result
def series_to_schema(series) -> JSON_TYPE:
assert isinstance(series, pd.Series)
if (
isinstance(series, SeriesWithSchema)
and hasattr(series, "json_schema")
and series.json_schema is not None
):
return series.json_schema
(n_rows,) = series.shape
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {"description": str(series.name), **dtype_to_schema(series.dtype)},
}
lale.type_checking.validate_is_schema(result)
return result
def torch_tensor_to_schema(tensor) -> JSON_TYPE:
assert torch_installed, """Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
assert isinstance(tensor, torch.Tensor)
result: JSON_TYPE
# https://pytorch.org/docs/stable/tensor_attributes.html#torch-dtype
if tensor.dtype == torch.bool:
result = {"type": "boolean"}
elif tensor.dtype == torch.uint8:
result = {"type": "integer", "minimum": 0, "maximum": 255}
elif torch.is_floating_point(tensor):
result = {"type": "number"}
else:
result = {"type": "integer"}
for dim in reversed(tensor.shape):
result = {"type": "array", "minItems": dim, "maxItems": dim, "items": result}
return result
def is_liac_arff(obj) -> bool:
expected_types = {
"description": str,
"relation": str,
"attributes": list,
"data": list,
}
if not isinstance(obj, dict):
return False
for k, t in expected_types.items():
if k not in obj or not isinstance(obj[k], t):
return False
return True
def liac_arff_to_schema(larff) -> JSON_TYPE:
assert is_liac_arff(
larff
), """Your Python environment might contain an 'arff' package different from 'liac-arff'. You can install it with
pip install 'liac-arff>=2.4.0'
or with
pip install 'lale[full]'"""
n_rows, n_columns = len(larff["data"]), len(larff["attributes"])
def larff_type_to_schema(larff_type) -> JSON_TYPE:
if isinstance(larff_type, str):
a2j = {
"numeric": "number",
"real": "number",
"integer": "integer",
"string": "string",
}
return {"type": a2j[larff_type.lower()]}
assert isinstance(larff_type, list)
return {"enum": [*larff_type]}
items = [
{"description": attr[0], **larff_type_to_schema(attr[1])}
for attr in larff["attributes"]
]
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": items,
},
}
lale.type_checking.validate_is_schema(result)
return result
def to_schema(obj) -> JSON_TYPE:
result = None
if obj is None:
result = {"enum": [None]}
elif isinstance(obj, np.ndarray):
result = ndarray_to_schema(obj)
elif isinstance(obj, scipy.sparse.csr_matrix):
result = csr_matrix_to_schema(obj)
elif isinstance(obj, pd.DataFrame):
result = dataframe_to_schema(obj)
elif isinstance(obj, pd.Series):
result = series_to_schema(obj)
elif torch_installed and isinstance(obj, torch.Tensor):
result = torch_tensor_to_schema(obj)
elif is_liac_arff(obj):
result = liac_arff_to_schema(obj)
elif lale.type_checking.is_schema(obj):
result = obj
elif isinstance(obj, list):
result = list_tensor_to_schema(obj)
elif _is_spark_df(obj):
result = dataframe_to_schema(obj.toPandas())
if result is None:
raise ValueError(f"to_schema(obj), type {type(obj)}, value {obj}")
lale.type_checking.validate_is_schema(result)
return result
| 34.37013 | 205 | 0.648844 |
6feb9a8436d1827f198c673318393283f27c5d04
| 1,610 |
py
|
Python
|
casts/serializers.py
|
rocky-roll-call/rrc-backend
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
[
"MIT"
] | null | null | null |
casts/serializers.py
|
rocky-roll-call/rrc-backend
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
[
"MIT"
] | null | null | null |
casts/serializers.py
|
rocky-roll-call/rrc-backend
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
[
"MIT"
] | null | null | null |
"""
"""
from rest_framework.serializers import ModelSerializer
from events.serializers import EventSerializer
from .models import Cast, CastPhoto, PageSection
class CastSerializer(ModelSerializer):
"""
Serializer for the casts.Cast model
"""
upcoming_events = EventSerializer(many=True, read_only=True)
class Meta:
model = Cast
fields = (
"id",
"name",
"slug",
"description",
"logo",
"email",
"created",
"external_url",
"facebook_url",
"twitter_user",
"instagram_user",
"managers",
"members",
"member_requests",
"blocked",
# "future_events",
"upcoming_events",
)
read_only_fields = (
"slug",
"created",
"managers",
"members",
"member_requests",
"blocked",
# "future_events",
"upcoming_events",
)
class PageSectionSerializer(ModelSerializer):
"""
A serializer for the casts.PageSection model
"""
class Meta:
model = PageSection
fields = ("id", "cast", "title", "text", "order", "created")
read_only_fields = ("cast", "created")
class CastPhotoSerializer(ModelSerializer):
"""
A serializer for the casts.CastPhoto model
"""
class Meta:
model = CastPhoto
fields = ("id", "cast", "image", "description", "created")
read_only_fields = ("cast", "image", "created")
| 23.333333 | 68 | 0.524224 |
7376490355ade9945afe24897f2f730ecddf4cad
| 3,556 |
py
|
Python
|
easytrader/ricequant_follower.py
|
izhangxm/easytrader
|
b4815c16fe99d43f4c131a7950a178d892c4c8f3
|
[
"MIT"
] | 15 |
2018-05-16T02:39:01.000Z
|
2021-05-22T13:12:55.000Z
|
autologin/ricequant_follower.py
|
pchaos/wanggejiaoyi
|
60242d465bf10d4be46ee6eafc99557affc2a52e
|
[
"MIT"
] | null | null | null |
autologin/ricequant_follower.py
|
pchaos/wanggejiaoyi
|
60242d465bf10d4be46ee6eafc99557affc2a52e
|
[
"MIT"
] | 9 |
2018-05-16T00:47:34.000Z
|
2021-11-26T05:39:48.000Z
|
# coding:utf8
from __future__ import unicode_literals
from datetime import datetime
from threading import Thread
from .follower import BaseFollower
from .log import log
class RiceQuantFollower(BaseFollower):
def login(self, user, password, **kwargs):
from rqopen_client import RQOpenClient
self.client = RQOpenClient(user, password, logger=log)
def follow(self, users, run_id, track_interval=1,
trade_cmd_expire_seconds=120, cmd_cache=True, entrust_prop='limit', send_interval=0):
"""跟踪ricequant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param run_id: ricequant 的模拟交易ID,支持使用 [] 指定多个模拟交易
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足
"""
users = self.warp_list(users)
run_id_list = self.warp_list(run_id)
if cmd_cache:
self.load_expired_cmd_cache()
self.start_trader_thread(users, trade_cmd_expire_seconds, entrust_prop, send_interval)
workers = []
for run_id in run_id_list:
strategy_name = self.extract_strategy_name(run_id)
strategy_worker = Thread(target=self.track_strategy_worker, args=[run_id, strategy_name],
kwargs={'interval': track_interval})
strategy_worker.start()
workers.append(strategy_worker)
log.info('开始跟踪策略: {}'.format(strategy_name))
for worker in workers:
worker.join()
def extract_strategy_name(self, run_id):
ret_json = self.client.get_positions(run_id)
if ret_json["code"] != 200:
log.error("fetch data from run_id {} fail, msg {}".format(run_id, ret_json["msg"]))
raise RuntimeError(ret_json["msg"])
return ret_json["resp"]["name"]
def extract_day_trades(self, run_id):
ret_json = self.client.get_day_trades(run_id)
if ret_json["code"] != 200:
log.error("fetch day trades from run_id {} fail, msg {}".format(run_id, ret_json["msg"]))
raise RuntimeError(ret_json["msg"])
return ret_json["resp"]["trades"]
def query_strategy_transaction(self, strategy, **kwargs):
transactions = self.extract_day_trades(strategy)
transactions = self.project_transactions(transactions, **kwargs)
return self.order_transactions_sell_first(transactions)
@staticmethod
def stock_shuffle_to_prefix(stock):
assert len(stock) == 11, 'stock {} must like 123456.XSHG or 123456.XSHE'.format(stock)
code = stock[:6]
if stock.find('XSHG') != -1:
return 'sh' + code
elif stock.find('XSHE') != -1:
return 'sz' + code
raise TypeError('not valid stock code: {}'.format(code))
def project_transactions(self, transactions, **kwargs):
new_transactions = []
for t in transactions:
trans = {}
trans["price"] = t["price"]
trans["amount"] = int(abs(t["quantity"]))
trans["datetime"] = datetime.strptime(t["time"], '%Y-%m-%d %H:%M:%S')
trans["stock_code"] = self.stock_shuffle_to_prefix(t["order_book_id"])
trans["action"] = 'buy' if t["quantity"] > 0 else 'sell'
new_transactions.append(trans)
return new_transactions
| 40.873563 | 101 | 0.639483 |
73f39e888b953d9abcef698f11b27405385d9468
| 797 |
py
|
Python
|
nz_crawl_demo/day2/requests/youdao.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_crawl_demo/day2/requests/youdao.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_crawl_demo/day2/requests/youdao.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
import requests
import json
headers = {
'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0"
}
url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule"
#_o要去掉
f = input('请输入要翻译的内容:')
#en zh-CHS AUTO
data = {
"i":f,
"version":"2.1",
"ts":"1583828597456",
"to":" zh-CHS",
"from":"AUTO",
"smartresult":"AUTO",
"client":"fanyideskweb",
"salt":"15838292461802",
"sign":"d179ebf9e387480bbed0e48c163147e0",
"bv":"a9c3483a52d7863608142cc3f302a0ba",
"doctype":"json",
"keyfrom":"fanyi.web",
"action":"FY_BY_CLICKBUTTION",
}
res = requests.post(url,data=data,headers=headers)
# print(res.text)
# dic = json.loads(res.text)
dic = res.json()
tgt = dic['translateResult'][0][0]['tgt']
print(tgt)
| 22.771429 | 91 | 0.646173 |
fb9ff1dd029299b8da9dcbf6d6b7b19daf046a96
| 138 |
py
|
Python
|
leetcode/231-Power-of-Two/PowofTwo_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/231-Power-of-Two/PowofTwo_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/231-Power-of-Two/PowofTwo_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {integer} n
# @return {boolean}
def isPowerOfTwo(self, n):
return n > 0 and (n & n - 1 is 0)
| 23 | 41 | 0.550725 |
e3d7b7511698b506cbfd22bdcaca42221de3137a
| 609 |
py
|
Python
|
Source/07_TCP/client.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | 1 |
2022-02-28T09:49:35.000Z
|
2022-02-28T09:49:35.000Z
|
Source/07_TCP/client.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | null | null | null |
Source/07_TCP/client.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | null | null | null |
import socket
print("Client Side TCP")
print("~" * 30)
# Create Socket
# AF_INET: IPV4
# SOCK_STREAM : TCP
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip = "localhost" # from which ip can i receive socket, or receive from all
port = 20000
# 1. Connect with the server
s.connect((ip,port))
try:
while True:
message = input("Enter your message:")
s.send(message.encode())
# wait for the answer from the server
answer = s.recv(1024)
print(f"[{ip}] {answer.decode()}")
except:
print("Something wrong happend!")
finally:
s.close()
| 14.853659 | 74 | 0.6289 |
5497384233fe42ca473e8ff237ccc876bfc3345c
| 2,839 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/accounts/report/asset_depreciation_ledger/asset_depreciation_ledger.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/report/asset_depreciation_ledger/asset_depreciation_ledger.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/report/asset_depreciation_ledger/asset_depreciation_ledger.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = get_columns(), get_data(filters)
return columns, data
def get_data(filters):
data = frappe.db.sql("""
select
a.name as asset, a.asset_category, a.status,
a.depreciation_method, a.purchase_date, a.gross_purchase_amount,
ds.schedule_date as depreciation_date, ds.depreciation_amount,
ds.accumulated_depreciation_amount,
(a.gross_purchase_amount - ds.accumulated_depreciation_amount) as amount_after_depreciation,
ds.journal_entry as depreciation_entry
from
`tabAsset` a, `tabDepreciation Schedule` ds
where
a.name = ds.parent
and a.docstatus=1
and ifnull(ds.journal_entry, '') != ''
and ds.schedule_date between %(from_date)s and %(to_date)s
and a.company = %(company)s
{conditions}
order by
a.name asc, ds.schedule_date asc
""".format(conditions=get_filter_conditions(filters)), filters, as_dict=1)
return data
def get_filter_conditions(filters):
conditions = ""
if filters.get("asset"):
conditions += " and a.name = %(asset)s"
if filters.get("asset_category"):
conditions += " and a.asset_category = %(asset_category)s"
return conditions
def get_columns():
return [
{
"label": _("Asset"),
"fieldname": "asset",
"fieldtype": "Link",
"options": "Asset",
"width": 120
},
{
"label": _("Depreciation Date"),
"fieldname": "depreciation_date",
"fieldtype": "Date",
"width": 120
},
{
"label": _("Purchase Amount"),
"fieldname": "gross_purchase_amount",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Depreciation Amount"),
"fieldname": "depreciation_amount",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Accumulated Depreciation Amount"),
"fieldname": "accumulated_depreciation_amount",
"fieldtype": "Currency",
"width": 210
},
{
"label": _("Amount After Depreciation"),
"fieldname": "amount_after_depreciation",
"fieldtype": "Currency",
"width": 180
},
{
"label": _("Depreciation Entry"),
"fieldname": "depreciation_entry",
"fieldtype": "Link",
"options": "Journal Entry",
"width": 140
},
{
"label": _("Asset Category"),
"fieldname": "asset_category",
"fieldtype": "Link",
"options": "Asset Category",
"width": 120
},
{
"label": _("Current Status"),
"fieldname": "status",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Depreciation Method"),
"fieldname": "depreciation_method",
"fieldtype": "Data",
"width": 130
},
{
"label": _("Purchase Date"),
"fieldname": "purchase_date",
"fieldtype": "Date",
"width": 120
}
]
| 23.857143 | 95 | 0.653047 |
15100879c003422f43d6a8e73e80e3c5e89eb0b3
| 369 |
py
|
Python
|
Python-programming-1/alpha_rearrange.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
Python-programming-1/alpha_rearrange.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
Python-programming-1/alpha_rearrange.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | 1 |
2020-09-30T18:53:05.000Z
|
2020-09-30T18:53:05.000Z
|
# Program to Rearrange the Entered Lower Case Alphabets in their Correct Arrangement
a = int(input("Enter Number of Alphabets:-"))
b = []
for i in range(a):
b.append(input(f"Enter {i}th Lower Case Alphabet:-"))
for i in range(a):
b[i] = ord(b[i])
b.sort()
for i in range(a):
b[i] = chr(b[i])
print("Alphabets in Correct Arrangement:-")
print(b)
| 28.384615 | 85 | 0.639566 |
421fb309e454892b6d80a271e25dba717d26140d
| 8,040 |
py
|
Python
|
official/cv/posenet/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/posenet/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/posenet/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train posenet"""
import ast
import argparse
import os
import shutil
import numpy as np
from mindspore.common import set_seed
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank
from mindspore.nn import Adagrad
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
import mindspore.common.dtype as ms
from src.posenet import PoseNet
from src.config import common_config, KingsCollege, StMarysChurch
from src.dataset import create_posenet_dataset
from src.loss import PosenetWithLoss
set_seed(1)
parser = argparse.ArgumentParser(description='Posenet train.')
parser.add_argument("--run_distribute", type=ast.literal_eval, default=False, help="Run distribute, default is false.")
parser.add_argument('--dataset', type=str, default='KingsCollege',
choices=['KingsCollege', 'StMarysChurch'], help='Name of dataset.')
parser.add_argument('--device_num', type=int, default=1, help='Number of device.')
# 模型输出目录
parser.add_argument('--train_url', type=str, default='', help='the path model saved')
# 数据集目录
parser.add_argument('--data_url', type=str, default='', help='the training data')
# 抽取出来的超参配置
parser.add_argument('--pre_trained', type=ast.literal_eval, default=False, help='Pretrained checkpoint path')
parser.add_argument('--device_id', type=int, default=None, help='device id of GPU or Ascend. (Default: None)')
parser.add_argument('--device_target', type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"),
help="Device target, support Ascend, GPU and CPU.")
parser.add_argument('--max_steps', type=int, default=30000, help='max_steps')
parser.add_argument('--save_checkpoint_epochs', type=int, default=5, help='save_checkpoint_epochs')
parser.add_argument('--keep_checkpoint_max', type=int, default=10, help='keep_checkpoint_max')
parser.add_argument('--save_checkpoint', type=ast.literal_eval, default=True, help='save_checkpoint')
parser.add_argument("--file_name", type=str, default="posenet", help="output file name.")
parser.add_argument('--is_modelarts', type=ast.literal_eval, default=True, help='Train in Modelarts.')
args_opt = parser.parse_args()
CACHE_TRAINING_URL = "/cache/training/"
CACHE = "/cache/"
src = "/"
local_data_path = '/cache/data/'
if not os.path.isdir(CACHE_TRAINING_URL):
os.makedirs(CACHE_TRAINING_URL)
if __name__ == '__main__':
cfg = common_config
if args_opt.dataset == "KingsCollege":
dataset_cfg = KingsCollege
elif args_opt.dataset == "StMarysChurch":
dataset_cfg = StMarysChurch
device_target = args_opt.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
if args_opt.run_distribute:
if device_target == "Ascend":
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(device_id=device_id, enable_auto_mixed_precision=True)
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True,
auto_parallel_search_mode="recursive_programming")
init()
elif device_target == "GPU":
init()
context.set_auto_parallel_context(device_num=args_opt.device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True,
auto_parallel_search_mode="recursive_programming")
else:
if args_opt.device_id is not None:
context.set_context(device_id=args_opt.device_id)
else:
context.set_context(device_id=cfg.device_id)
train_dataset_path = dataset_cfg.dataset_path
if args_opt.is_modelarts:
import moxing as mox
mox.file.copy_parallel(args_opt.data_url, CACHE)
if args_opt.dataset == "KingsCollege":
mindrecord_file_name = "KingsCollege_posenet_train.mindrecord"
elif args_opt.dataset == "StMarysChurch":
mindrecord_file_name = "StMarysChurch_posenet_train.mindrecord"
mindrecord_file = os.path.join(CACHE, mindrecord_file_name)
dataset = create_posenet_dataset(mindrecord_file, batch_size=dataset_cfg.batch_size,
device_num=args_opt.device_num, is_training=True)
step_per_epoch = dataset.get_dataset_size()
net_with_loss = PosenetWithLoss(args_opt.pre_trained)
opt = Adagrad(params=net_with_loss.trainable_params(),
learning_rate=dataset_cfg.lr_init,
weight_decay=dataset_cfg.weight_decay)
model = Model(net_with_loss, optimizer=opt)
time_cb = TimeMonitor(data_size=step_per_epoch)
loss_cb = LossMonitor()
cb = [time_cb, loss_cb]
if args_opt.save_checkpoint:
config_ck = CheckpointConfig(save_checkpoint_steps=args_opt.save_checkpoint_epochs * step_per_epoch,
keep_checkpoint_max=args_opt.keep_checkpoint_max)
if args_opt.is_modelarts:
save_checkpoint_path = CACHE_TRAINING_URL
if args_opt.device_num == 1:
ckpt_cb = ModelCheckpoint(prefix='train_posenet_' + args_opt.dataset,
directory=save_checkpoint_path,
config=config_ck)
cb += [ckpt_cb]
if args_opt.device_num > 1 and get_rank() % 8 == 0:
ckpt_cb = ModelCheckpoint(prefix='train_posenet_' + args_opt.dataset,
directory=save_checkpoint_path,
config=config_ck)
cb += [ckpt_cb]
else:
save_checkpoint_path = cfg.checkpoint_dir
if not os.path.isdir(save_checkpoint_path):
os.makedirs(save_checkpoint_path)
if args_opt.device_num == 1:
ckpt_cb = ModelCheckpoint(prefix='train_posenet_' + args_opt.dataset,
directory=save_checkpoint_path,
config=config_ck)
cb += [ckpt_cb]
if args_opt.device_num > 1 and get_rank() % 8 == 0:
ckpt_cb = ModelCheckpoint(prefix='train_posenet_' + args_opt.dataset,
directory=save_checkpoint_path,
config=config_ck)
cb += [ckpt_cb]
epoch_size = args_opt.max_steps // args_opt.device_num // step_per_epoch
model.train(1, dataset, callbacks=cb)
net = PoseNet()
file_name1 = "train_posenet_KingsCollege-1_16.ckpt"
assert cfg.checkpoint_dir is not None, "cfg.checkpoint_dir is None."
param_dict = load_checkpoint(os.path.join(CACHE_TRAINING_URL, file_name1))
load_param_into_net(net, param_dict)
input_arr = Tensor(np.ones([1, 3, 224, 224]), ms.float32)
export(net, input_arr, file_name=args_opt.file_name, file_format='AIR')
shutil.copy('posenet.air', CACHE_TRAINING_URL)
if args_opt.is_modelarts:
mox.file.copy_parallel(src_url=CACHE_TRAINING_URL, dst_url=args_opt.train_url)
| 50.566038 | 119 | 0.668284 |
42261dc12b7a9989f51b7053daaf95a0d44e1990
| 156 |
py
|
Python
|
perimeterOfRectangle.py
|
mrmayurs4/Hacktoberfest-2020
|
f2bc129bd8574d5870b9595a019bff3baddeaf73
|
[
"MIT"
] | null | null | null |
perimeterOfRectangle.py
|
mrmayurs4/Hacktoberfest-2020
|
f2bc129bd8574d5870b9595a019bff3baddeaf73
|
[
"MIT"
] | null | null | null |
perimeterOfRectangle.py
|
mrmayurs4/Hacktoberfest-2020
|
f2bc129bd8574d5870b9595a019bff3baddeaf73
|
[
"MIT"
] | null | null | null |
def perimeter(base,height):
return 2*base*height
base = int(input("Enter Base: "))
height = int(input("Enter height "))
print(perimeter(base,height))
| 26 | 37 | 0.698718 |
422f8fc136e2ef189e11a0cd0fa0ce7385751fb6
| 5,886 |
py
|
Python
|
marsyas-vamp/marsyas/scripts/large-evaluators/make-mf.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/large-evaluators/make-mf.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/large-evaluators/make-mf.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import numpy
import pylab
PLOT = True
PLOT = False
INCLUDE_05 = False
#INCLUDE_05 = True
def accuracy(bpm_detected, bpm_ground):
tolerance = 0.04
diff = abs(bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return True
return False
def double_harmonic_accuracy(bpm_detected, bpm_ground):
tolerance = 0.04
for m in [1, 2]:
diff = abs(m*bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return m
if not INCLUDE_05:
continue
diff = abs(1.0/m*bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return 1.0/m
return 0
def extended_harmonic_accuracy(bpm_detected, bpm_ground):
tolerance = 0.04
for m in [1, 2, 3]:
diff = abs(m*bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return m
diff = abs((1.0/m)*bpm_detected - bpm_ground)
if diff <= tolerance * bpm_ground:
return 1.0/m
return 0
filename = sys.argv[1]
lines = open(filename).readlines()
out = open("weka.arff", 'w')
INTRO_BEGIN = """@relation mults"""
FEATURES = [
#'above_p1',
#'below_p1',
#'above_p2',
#'below_p2',
#'above_p3',
#'below_p3',
'energy_under',
'energy_over',
'energy_residual',
'str05',
'str10',
'str20',
'str_residual',
#'strp1',
#'strp2',
#'strp3',
#'strp_residual',
#'rel1',
'rel2',
'rel3',
#'has2',
#'has3',
'num_non_zero',
#'energyinpeak',
]
if INCLUDE_05:
OUTRO = """@attribute heuristic_bpm numeric
@attribute class {0.5,1.0,2.0}
@data
"""
else:
OUTRO = """@attribute heuristic_bpm numeric
@attribute class {1.0,2.0}
@data
"""
#OUTRO = """@attribute class {0.5,1.0,2.0}
#@attribute class {0.5,1.0,2.0}
#@attribute class {1.0,2.0}
#@attribute class {1.0,2.0,1.5}
#@attribute class {0.25,0.333333333333,0.5,0.666666666667,1.0,1.5,2.0,3.0,4.0}
INTRO = "%s\n%s\n%s" % (
INTRO_BEGIN,
'\n'.join(["@attribute bp-%s numeric" % x for x in FEATURES]),
OUTRO
)
#@attribute 19 numeric
out.write(INTRO)
harmonics = 0
cause_problems = 0
total = 0
multsdict = {}
grounds = []
failures = []
minmax = []
for line in lines:
sl = line.rstrip().split("\t")
if line.startswith("features_orig:"):
detected = float(sl[-2])
ground_truth = float(sl[-1])
vec = numpy.zeros(len(sl)-2)
for i in range(1,len(sl)-1):
vec[i-1] = float(sl[i])
minmax.append(vec)
#if not line.startswith("features_orig:"):
if not line.startswith("features_normalized:"):
continue
#detected_norm = float(sl[-2])
grounds.append(ground_truth)
mirex = False
harmonic = False
if accuracy(detected, ground_truth):
mirex = True
if extended_harmonic_accuracy(detected, ground_truth):
harmonic = True
mult = double_harmonic_accuracy(detected, ground_truth)
#mult = extended_harmonic_accuracy(detected, ground_truth)
try:
multsdict[mult] += 1
except:
multsdict[mult] = 1
if mult > 0:
if not accuracy(mult*detected, ground_truth):
print "failure"
print detected, ground_truth, mult
exit(1)
else:
if extended_harmonic_accuracy(2.0*detected, ground_truth):
mult = 2.0
#print "extra 2"
elif INCLUDE_05:
if extended_harmonic_accuracy(0.5*detected, ground_truth):
mult = 0.5
#print "extra 0.5"
#if accuracy(detected, ground_truth):
# mult = 1.0
#else:
# if accuracy(2*detected, ground_truth):
# mult = 2.0
#if extended_harmonic_accuracy(detected, ground_truth):
if mult > 0:
harmonics += 1
#if extended_harmonic_accuracy(mult*detected, ground_truth) == 0:
if accuracy(mult*detected, ground_truth) == 0:
cause_problems += 1
# don't multipy value; penalize MIREX but keep HARMONIC
mult = 1.0
total += 1
vec = numpy.zeros(len(sl)-1+1-1)
for i in range(1,len(sl)-1):
vec[i-1] = float(sl[i])
vec[-1] = mult
#vec[0] = detected
div = detected / ground_truth
if mult > 0:
text = ",".join( [str(v) for v in vec] )
out.write(text+"\n")
else:
failures.append(div)
if PLOT:
#if mirex:
# pylab.plot(div, 'go')
#elif harmonic:
# pylab.plot(div, 'go')
#else:
# pylab.plot(div, 'ro')
if mirex:
pylab.plot(ground_truth, detected, 'g.')
elif harmonic:
pylab.plot(ground_truth, detected, 'b.')
else:
pylab.plot(ground_truth, detected, 'ro')
out.close()
print "Harmonic accuracy:\t%i / %i (%.3f)" % (
harmonics, total, float(harmonics)/total)
print "New mults cause problems for:\t%i" % cause_problems
print multsdict
lowbound = 35
highbound = 6*35
lowbound = 40
highbound = 180
lows = 0
highs = 0
for g in grounds:
if g < lowbound:
lows += 1
if g > highbound:
highs += 1
print "With bounds of %i - %i, we miss %i - %i (sum %i) out of %i (%.3f%%)" % (
lowbound, highbound, lows, highs, lows+highs, total,
float(lows+highs)/total
)
print "min/max ground truths:\t", min(grounds), max(grounds)
if PLOT:
pylab.xlabel("ground")
pylab.ylabel("detected")
pylab.figure()
pylab.hist(grounds, bins=100)
pylab.show()
#pylab.hist(failures, bins=200)
#pylab.show()
vals = numpy.array(minmax)
mins = vals.min(axis=0)
maxs = vals.max(axis=0)
print " const mrs_real mins[] = {",
for m in mins:
print str(m) + ",",
print "0 };"
print " const mrs_real maxs[] = {",
for m in maxs:
print str(m) + ",",
print "0 };"
print "num features:\t", len(mins)
| 23.173228 | 79 | 0.583248 |
428d45451783ca43ac3bfe544ab705e36f87eae5
| 2,784 |
py
|
Python
|
official/cv/lenet_quant/export_bin_file.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
official/cv/lenet_quant/export_bin_file.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
official/cv/lenet_quant/export_bin_file.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
export mnist dataset to bin.
"""
import os
import argparse
from mindspore import context
from src.dataset import create_dataset
def parse_args():
parser = argparse.ArgumentParser(description='MNIST to bin')
parser.add_argument('--device_target', type=str, default="Ascend",
choices=['Ascend', 'GPU'],
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--dataset_dir', type=str, default='', help='dataset path')
parser.add_argument('--save_dir', type=str, default='', help='path to save bin file')
parser.add_argument('--batch_size', type=int, default=1, help='batch size for bin')
args_, _ = parser.parse_known_args()
return args_
if __name__ == "__main__":
args = parse_args()
os.environ["RANK_SIZE"] = '1'
os.environ["RANK_ID"] = '0'
device_id = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=device_id)
mnist_path = os.path.join(args.dataset_dir, 'test')
batch_size = args.batch_size
save_dir = os.path.join(args.save_dir, 'lenet_quant_mnist_310_infer_data')
folder = os.path.join(save_dir, 'mnist_bs_' + str(batch_size) + '_bin')
if not os.path.exists(folder):
os.makedirs(folder)
ds = create_dataset(mnist_path, batch_size)
iter_num = 0
label_file = os.path.join(save_dir, './mnist_bs_' + str(batch_size) + '_label.txt')
with open(label_file, 'w') as f:
for data in ds.create_dict_iterator():
image = data['image']
label = data['label']
file_name = "mnist_" + str(iter_num) + ".bin"
file_path = folder + "/" + file_name
image.asnumpy().tofile(file_path)
f.write(file_name)
for i in label:
f.write(',' + str(i))
f.write('\n')
iter_num += 1
print("=====iter_num:{}=====".format(iter_num))
print("=====image_data:{}=====".format(image))
print("=====label_data:{}=====".format(label))
| 43.5 | 103 | 0.637931 |
674b9ddc42f1ff3d1c670e0d4aa4a409c115505a
| 1,268 |
py
|
Python
|
huTools/bank.py
|
gadventures/huTools
|
8bc58d63491bcd3cfc3e78d219be703d943ffeb5
|
[
"BSD-3-Clause"
] | null | null | null |
huTools/bank.py
|
gadventures/huTools
|
8bc58d63491bcd3cfc3e78d219be703d943ffeb5
|
[
"BSD-3-Clause"
] | null | null | null |
huTools/bank.py
|
gadventures/huTools
|
8bc58d63491bcd3cfc3e78d219be703d943ffeb5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
bank.py
Created by Christian Klein on 2010-03-25.
Copyright (c) 2010 Christian Klein. All rights reserved.
"""
from __future__ import unicode_literals
from builtins import str
from string import ascii_uppercase
def convert_character(string):
"""Konvertierung von nicht-numerischen Zeichen in einer IBAN"""
result = []
for character in string:
if character.isdigit():
result.append(character)
else:
result.append(str(ascii_uppercase.index(character) + 10))
return "".join(result)
def iban(ktonr, blz, land="DE"):
"""
Berechnet die International Bank Account Number für deutsche Konten
Zunächst wird die Basic Bank Account Number für deutsche Konten berechnet.
Der Laendercode wird mit zwei '0' aufgefüllt und an die BBAN gehangen.
Aus diesem wird der Prüfcode als Modulo 97-10 (DIN ISO 7064) berechnet.
>>>
"""
bban = "%08d%010d" % (int(blz), int(ktonr))
tmp = convert_character("%s%s00" % (bban, land))
pruefziffer = 98 - (int(tmp) % 97)
return "%s%02d%s" % (land, pruefziffer, bban)
def check_iban(value):
"""Validiere (errechnete) IBAN"""
return int(convert_character(value[4:] + value[:4])) % 97 == 1
| 26.978723 | 78 | 0.671136 |
18fe11589d98cbb919f494bc680420c8f37b8e95
| 2,633 |
py
|
Python
|
jordamach/linear_regression.py
|
lacriment/Jordamach
|
a17e22a48e4b3cb66c3f20a6c706b37dcbb56633
|
[
"MIT"
] | null | null | null |
jordamach/linear_regression.py
|
lacriment/Jordamach
|
a17e22a48e4b3cb66c3f20a6c706b37dcbb56633
|
[
"MIT"
] | null | null | null |
jordamach/linear_regression.py
|
lacriment/Jordamach
|
a17e22a48e4b3cb66c3f20a6c706b37dcbb56633
|
[
"MIT"
] | null | null | null |
from math import log
import numpy as np
class Regrezio:
"""Base class for linear regression models"""
def __init__(self, model='lin'):
# set the regression model
self.model = model.lower()
def fit(self, x, y):
""" Fits values to linear regression model and calculates
coefficients and intercept """
self.y = np.array(y)
self.x = np.array(x)
# Calculate the lns of the values for function models
if self.model == 'log-lin':
self.y = np.array([[log(i[0])] for i in self.y])
elif self.model == 'log-log':
self.y = np.array([[log(i[0])] for i in self.y])
self.x = np.array([[log(cell) for cell in row] for row in self.x])
elif self.model == 'lin-log':
self.x = np.array([[log(cell) for cell in row] for row in self.x])
elif self.model == 'quadratic':
self.x = np.array([[xij[0], xij[0] ** 2] for xij in self.x])
elif self.model == 'lin' or self.model == 'lin-lin':
pass
# insert ones to first column
self.x1 = np.concatenate((np.ones((len(self.x), 1)), self.x), axis=1)
# transpose of x
self.xt = np.transpose(self.x1)
# X'X
self.xtx = self.xt @ self.x1
# X'X inversed
self.xtx_inv = np.linalg.inv(self.xtx)
# X'Y
self.xty = self.xt @ self.y
# (X'X)^-1(X'Y)
self.coefficient_vector = self.xtx_inv @ self.xty
# independent value in the model
self.intercept = self.coefficient_vector[0]
# estimated coefficients of the model
self.coefficients = self.coefficient_vector[1:]
# set the length of the series
self.n = len(self.y)
def y_func(self, xi):
return float(self.intercept + sum([xij * ci for xij, ci in zip([xi], self.coefficients)]))
def predict(self, x):
""" Calculates predicted Y values and error values """
# predicted Y values. Y^
self.y_pred = np.array(
[[self.y_func(xi[0])] for xi in x])
# mean of Y's
self.y_bar = float(sum(self.y) / self.n)
# To calculate the determination coefficient r^2
# error values
self.errors = np.array(
[[float(yi - yi_pred)] for yi, yi_pred in zip(self.y, self.y_pred)]
)
self.squared_errors = sum(np.array(
[[e**2] for e in self.errors]
))
self.total_variation = sum([(yi - self.y_bar)**2 for yi in self.y])
self.score = float(1 - self.squared_errors / self.total_variation)
return self.y_pred
| 36.068493 | 98 | 0.560957 |
7a08c6440ed591af6e4641ecdcbebe66240ceccb
| 2,390 |
py
|
Python
|
build/dart/verify_sources.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
build/dart/verify_sources.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
build/dart/verify_sources.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
FUCHSIA_ROOT = os.path.dirname( # $root
os.path.dirname( # build
os.path.dirname( # dart
os.path.abspath(__file__))))
sys.path += [os.path.join(FUCHSIA_ROOT, 'third_party', 'pyyaml', 'lib')]
import yaml
def main():
parser = argparse.ArgumentParser(
'Verifies that all .dart files are included in sources')
parser.add_argument(
'--package_root',
help='Path to the directory hosting the library',
required=True)
parser.add_argument(
'--source_dir',
help='Path to the directory containing the package sources',
required=True)
parser.add_argument(
'--stamp',
help='File to touch when source checking succeeds',
required=True)
parser.add_argument(
'sources', help='source files', nargs=argparse.REMAINDER)
args = parser.parse_args()
if "third_party" in args.package_root:
with open(args.stamp, 'w') as stamp:
stamp.write('Success!')
return 0
source_files = set(args.sources)
source_root = os.path.join(args.package_root, args.source_dir)
missing_sources = []
exclude_dirs = ["testing"]
slice_length = len(args.package_root) + len(args.source_dir) + 2
for (dirpath, dirnames, filenames) in os.walk(source_root, topdown=True):
dirnames[:] = [d for d in dirnames if d not in exclude_dirs]
for filename in filenames:
full_filename = os.path.join(dirpath[slice_length:], filename)
[_, file_extension] = os.path.splitext(filename)
if file_extension == '.dart' and full_filename not in source_files:
missing_sources.extend([full_filename])
# We found one or more source files in the directory that was not included in sources.
if missing_sources:
print(
'Source files found that were missing from the "sources" parameter:'
)
for source in missing_sources:
print('"%s",' % source)
return 1
with open(args.stamp, 'w') as stamp:
stamp.write('Success!')
if __name__ == '__main__':
sys.exit(main())
| 33.661972 | 90 | 0.642259 |
e131d650446593c4a718f85066f749a6472c08e6
| 87 |
py
|
Python
|
python/coursera_python/FUND_OF_COMP_RICE/FUND_OF_COMPUTING_RICE2/week1/list_fibo.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/FUND_OF_COMP_RICE/FUND_OF_COMPUTING_RICE2/week1/list_fibo.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/FUND_OF_COMP_RICE/FUND_OF_COMPUTING_RICE2/week1/list_fibo.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
l = [0,1]
for i in range(1,10+1):
k = l[i] + l[i-1]
l.append(k)
print(l.pop())
| 14.5 | 23 | 0.471264 |
e147d22f9dfa0b16a83fb133ab0d72a670aeecc2
| 235,179 |
py
|
Python
|
bindings/python/ensmallen/datasets/kgobo.py
|
LucaCappelletti94/EnsmallenGraph
|
572532b6d3f4352bf58f9ccca955376acd95fd89
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen/datasets/kgobo.py
|
LucaCappelletti94/EnsmallenGraph
|
572532b6d3f4352bf58f9ccca955376acd95fd89
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen/datasets/kgobo.py
|
LucaCappelletti94/EnsmallenGraph
|
572532b6d3f4352bf58f9ccca955376acd95fd89
|
[
"MIT"
] | null | null | null |
"""Module providing graphs available from KGOBO.
References
----------
Please cite:
```bib
@misc{kgobo,
title = "KG-OBO",
year = "2021",
author = "{Reese, Justin and Caufield, Harry}",
howpublished = {\\url{https://github.com/Knowledge-Graph-Hub/kg-obo}},
note = {Online; accessed 14 September 2021}
}
```
"""
from ensmallen import Graph # pylint: disable=import-error
from .automatic_graph_retrieval import AutomaticallyRetrievedGraph
def MOD(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "10-03-2021-14-36", **kwargs
) -> Graph:
"""Return MOD graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "10-03-2021-14-36"
Version to retrieve
The available versions are:
- 1.031.4
- 10-03-2021-14-36
"""
return AutomaticallyRetrievedGraph(
"MOD", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBBT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-24", **kwargs
) -> Graph:
"""Return FBBT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-24"
Version to retrieve
The available versions are:
- 2022-04-13
- 2021-09-01
- 2021-10-14
- 2021-12-09
- 2022-01-27
- 2022-02-24
"""
return AutomaticallyRetrievedGraph(
"FBBT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def BTO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-04-27", **kwargs
) -> Graph:
"""Return BTO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-04-27"
Version to retrieve
The available versions are:
- 2021-10-26
- 2021-04-27
"""
return AutomaticallyRetrievedGraph(
"BTO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CHMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-17", **kwargs
) -> Graph:
"""Return CHMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-17"
Version to retrieve
The available versions are:
- 2022-04-19
- no_version
- 2022-02-17
"""
return AutomaticallyRetrievedGraph(
"CHMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OBA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-01-19", **kwargs
) -> Graph:
"""Return OBA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-01-19"
Version to retrieve
The available versions are:
- 2022-05-11
- 13-11-2015-10-21
- 2021-12-03
- 2022-01-19
"""
return AutomaticallyRetrievedGraph(
"OBA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-19", **kwargs
) -> Graph:
"""Return PSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-19"
Version to retrieve
The available versions are:
- 2020-05-19
"""
return AutomaticallyRetrievedGraph(
"PSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OGSF(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "11-22-2014", **kwargs
) -> Graph:
"""Return OGSF graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "11-22-2014"
Version to retrieve
The available versions are:
- 11-22-2014
"""
return AutomaticallyRetrievedGraph(
"OGSF", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-05-15", **kwargs
) -> Graph:
"""Return MCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-05-15"
Version to retrieve
The available versions are:
- 2019-05-15
"""
return AutomaticallyRetrievedGraph(
"MCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OPMI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "Vision-Release--1.0.130", **kwargs
) -> Graph:
"""Return OPMI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "Vision-Release--1.0.130"
Version to retrieve
The available versions are:
- Vision-Release--1.0.130
"""
return AutomaticallyRetrievedGraph(
"OPMI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBDV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-25", **kwargs
) -> Graph:
"""Return FBDV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-25"
Version to retrieve
The available versions are:
- 2022-04-12
- 2021-09-01
- 2021-10-13
- 2021-12-06
- 2022-01-24
- 2022-02-25
"""
return AutomaticallyRetrievedGraph(
"FBDV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CEPH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2016-01-12", **kwargs
) -> Graph:
"""Return CEPH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2016-01-12"
Version to retrieve
The available versions are:
- 2016-01-12
"""
return AutomaticallyRetrievedGraph(
"CEPH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MPATH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-19", **kwargs
) -> Graph:
"""Return MPATH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-19"
Version to retrieve
The available versions are:
- 2020-05-19
"""
return AutomaticallyRetrievedGraph(
"MPATH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SPD(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "1.0", **kwargs
) -> Graph:
"""Return SPD graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "1.0"
Version to retrieve
The available versions are:
- 1.0
"""
return AutomaticallyRetrievedGraph(
"SPD", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OMIT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "dev", **kwargs
) -> Graph:
"""Return OMIT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "dev"
Version to retrieve
The available versions are:
- dev
"""
return AutomaticallyRetrievedGraph(
"OMIT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def VT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "04-10-2021-10-15", **kwargs
) -> Graph:
"""Return VT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "04-10-2021-10-15"
Version to retrieve
The available versions are:
- 04-10-2021-10-15
"""
return AutomaticallyRetrievedGraph(
"VT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EHDAA2(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2013-07-04", **kwargs
) -> Graph:
"""Return EHDAA2 graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2013-07-04"
Version to retrieve
The available versions are:
- 2013-07-04
"""
return AutomaticallyRetrievedGraph(
"EHDAA2", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FLOPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return FLOPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"FLOPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def WBLS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-08", **kwargs
) -> Graph:
"""Return WBLS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-08"
Version to retrieve
The available versions are:
- 2022-03-22
- 2021-07-06
- 2021-12-08
"""
return AutomaticallyRetrievedGraph(
"WBLS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def RXNO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-06", **kwargs
) -> Graph:
"""Return RXNO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-06"
Version to retrieve
The available versions are:
- 2021-12-16
- 2021-01-21
- 2021-11-15
- 2021-12-06
"""
return AutomaticallyRetrievedGraph(
"RXNO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OMP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-06", **kwargs
) -> Graph:
"""Return OMP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-06"
Version to retrieve
The available versions are:
- 2022-06-03
- 2021-10-01
- 2021-12-03
- 2022-01-07
- 2022-02-08
- 2022-03-04
- 2022-04-11
- 2022-05-06
"""
return AutomaticallyRetrievedGraph(
"OMP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ERO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return ERO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"ERO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GNO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-13", **kwargs
) -> Graph:
"""Return GNO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-13"
Version to retrieve
The available versions are:
- 2022-02-23
- 2021-08-13
"""
return AutomaticallyRetrievedGraph(
"GNO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def XCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "4.46", **kwargs
) -> Graph:
"""Return XCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "4.46"
Version to retrieve
The available versions are:
- 4.46
"""
return AutomaticallyRetrievedGraph(
"XCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def AMPHX(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-12-18", **kwargs
) -> Graph:
"""Return AMPHX graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-12-18"
Version to retrieve
The available versions are:
- 2020-12-18
"""
return AutomaticallyRetrievedGraph(
"AMPHX", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EPIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-05-28", **kwargs
) -> Graph:
"""Return EPIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-05-28"
Version to retrieve
The available versions are:
- 2021-05-28
"""
return AutomaticallyRetrievedGraph(
"EPIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CLYH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-29", **kwargs
) -> Graph:
"""Return CLYH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-29"
Version to retrieve
The available versions are:
- 2020-05-29
"""
return AutomaticallyRetrievedGraph(
"CLYH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OOSTT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-01-08", **kwargs
) -> Graph:
"""Return OOSTT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-01-08"
Version to retrieve
The available versions are:
- 2021-01-08
"""
return AutomaticallyRetrievedGraph(
"OOSTT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FYPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-11", **kwargs
) -> Graph:
"""Return FYPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-11"
Version to retrieve
The available versions are:
- 2022-05-16
- 2021-10-05
- 2021-11-08
- 2021-11-18
- 2021-12-07
- 2022-01-18
- 2022-01-27
- 2022-04-22
- 2022-04-28
- 2022-05-11
"""
return AutomaticallyRetrievedGraph(
"FYPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def NCRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-12-10", **kwargs
) -> Graph:
"""Return NCRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-12-10"
Version to retrieve
The available versions are:
- 2015-12-10
"""
return AutomaticallyRetrievedGraph(
"NCRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def IAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-12-09", **kwargs
) -> Graph:
"""Return IAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-12-09"
Version to retrieve
The available versions are:
- 2020-12-09
"""
return AutomaticallyRetrievedGraph(
"IAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GEO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "production-version-2016-03-26", **kwargs
) -> Graph:
"""Return GEO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "production-version-2016-03-26"
Version to retrieve
The available versions are:
- production-version-2016-03-26
"""
return AutomaticallyRetrievedGraph(
"GEO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EXO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.1", **kwargs
) -> Graph:
"""Return EXO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.1"
Version to retrieve
The available versions are:
- 2.1
"""
return AutomaticallyRetrievedGraph(
"EXO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SWO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "swo.owl", **kwargs
) -> Graph:
"""Return SWO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "swo.owl"
Version to retrieve
The available versions are:
- swo.owl
"""
return AutomaticallyRetrievedGraph(
"SWO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OBCS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2018-02-22", **kwargs
) -> Graph:
"""Return OBCS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2018-02-22"
Version to retrieve
The available versions are:
- 2018-02-22
"""
return AutomaticallyRetrievedGraph(
"OBCS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ENVO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-05-14", **kwargs
) -> Graph:
"""Return ENVO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-05-14"
Version to retrieve
The available versions are:
- 2021-05-14
"""
return AutomaticallyRetrievedGraph(
"ENVO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SYMP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-10", **kwargs
) -> Graph:
"""Return SYMP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-10"
Version to retrieve
The available versions are:
- 2022-05-26
- 2020-08-04
- 2022-04-05
- 2022-05-10
"""
return AutomaticallyRetrievedGraph(
"SYMP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TAXRANK(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2016-04-15", **kwargs
) -> Graph:
"""Return TAXRANK graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2016-04-15"
Version to retrieve
The available versions are:
- 2016-04-15
"""
return AutomaticallyRetrievedGraph(
"TAXRANK", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def APO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-03-23", **kwargs
) -> Graph:
"""Return APO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-03-23"
Version to retrieve
The available versions are:
- 2022-04-19
- 2021-09-07
- 2021-10-07
- 2022-01-03
- 2022-02-08
- 2022-03-23
"""
return AutomaticallyRetrievedGraph(
"APO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CLO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-02-10", **kwargs
) -> Graph:
"""Return CLO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-02-10"
Version to retrieve
The available versions are:
- 2022-03-20
- 2019-02-10
"""
return AutomaticallyRetrievedGraph(
"CLO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-02-19", **kwargs
) -> Graph:
"""Return CMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-02-19"
Version to retrieve
The available versions are:
- 2019-02-19
"""
return AutomaticallyRetrievedGraph(
"CMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OHMI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-09-17", **kwargs
) -> Graph:
"""Return OHMI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-09-17"
Version to retrieve
The available versions are:
- 2019-09-17
"""
return AutomaticallyRetrievedGraph(
"OHMI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-11-28", **kwargs
) -> Graph:
"""Return HSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-11-28"
Version to retrieve
The available versions are:
- 2021-12-13
- 2020-11-28
"""
return AutomaticallyRetrievedGraph(
"HSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBBI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-11-06", **kwargs
) -> Graph:
"""Return FBBI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-11-06"
Version to retrieve
The available versions are:
- 2020-11-06
"""
return AutomaticallyRetrievedGraph(
"FBBI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OBI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-18", **kwargs
) -> Graph:
"""Return OBI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-18"
Version to retrieve
The available versions are:
- 2022-01-03
- 2021-08-18
"""
return AutomaticallyRetrievedGraph(
"OBI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CDAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-06-26", **kwargs
) -> Graph:
"""Return CDAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-06-26"
Version to retrieve
The available versions are:
- 2019-06-26
"""
return AutomaticallyRetrievedGraph(
"CDAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MFMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2013-11-16", **kwargs
) -> Graph:
"""Return MFMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2013-11-16"
Version to retrieve
The available versions are:
- 2013-11-16
"""
return AutomaticallyRetrievedGraph(
"MFMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-12-11", **kwargs
) -> Graph:
"""Return CRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-12-11"
Version to retrieve
The available versions are:
- 2019-12-11
"""
return AutomaticallyRetrievedGraph(
"CRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CHEMINF(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.0", **kwargs
) -> Graph:
"""Return CHEMINF graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.0"
Version to retrieve
The available versions are:
- 2.0
"""
return AutomaticallyRetrievedGraph(
"CHEMINF", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-11-04", **kwargs
) -> Graph:
"""Return MP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-11-04"
Version to retrieve
The available versions are:
- releases
- 2021-09-21
- 2021-10-15
- 2021-10-26
- 2021-11-04
"""
return AutomaticallyRetrievedGraph(
"MP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DUO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-02-23", **kwargs
) -> Graph:
"""Return DUO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-02-23"
Version to retrieve
The available versions are:
- 2021-02-23
"""
return AutomaticallyRetrievedGraph(
"DUO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def LABO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-06-08", **kwargs
) -> Graph:
"""Return LABO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-06-08"
Version to retrieve
The available versions are:
- 2021-06-08
"""
return AutomaticallyRetrievedGraph(
"LABO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OLATDV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-10", **kwargs
) -> Graph:
"""Return OLATDV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-10"
Version to retrieve
The available versions are:
- 2020-03-10
"""
return AutomaticallyRetrievedGraph(
"OLATDV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MPIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-01-30", **kwargs
) -> Graph:
"""Return MPIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-01-30"
Version to retrieve
The available versions are:
- 2019-01-30
"""
return AutomaticallyRetrievedGraph(
"MPIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CHEBI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "209", **kwargs
) -> Graph:
"""Return CHEBI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "209"
Version to retrieve
The available versions are:
- 210
- 203
- 204
- 205
- 206
- 207
- 208
- 209
"""
return AutomaticallyRetrievedGraph(
"CHEBI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def AEO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2014-12-05", **kwargs
) -> Graph:
"""Return AEO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2014-12-05"
Version to retrieve
The available versions are:
- 2014-12-05
"""
return AutomaticallyRetrievedGraph(
"AEO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FOBI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "fobi", **kwargs
) -> Graph:
"""Return FOBI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "fobi"
Version to retrieve
The available versions are:
- fobi
"""
return AutomaticallyRetrievedGraph(
"FOBI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GENO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-08", **kwargs
) -> Graph:
"""Return GENO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-08"
Version to retrieve
The available versions are:
- 2022-03-05
- 2020-03-08
"""
return AutomaticallyRetrievedGraph(
"GENO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SBO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "28-08-2021-03-13", **kwargs
) -> Graph:
"""Return SBO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "28-08-2021-03-13"
Version to retrieve
The available versions are:
- 28-08-2021-03-13
"""
return AutomaticallyRetrievedGraph(
"SBO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-03-09", **kwargs
) -> Graph:
"""Return TO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-03-09"
Version to retrieve
The available versions are:
- 2022-04-13
- 2021-04-06
- 2022-03-09
"""
return AutomaticallyRetrievedGraph(
"TO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def UO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return UO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"UO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MOP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-01", **kwargs
) -> Graph:
"""Return MOP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-01"
Version to retrieve
The available versions are:
- 2022-05-11
- 2014-09-03
- 2022-02-01
"""
return AutomaticallyRetrievedGraph(
"MOP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CHIRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-11-23", **kwargs
) -> Graph:
"""Return CHIRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-11-23"
Version to retrieve
The available versions are:
- 2015-11-23
"""
return AutomaticallyRetrievedGraph(
"CHIRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OGMS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-19", **kwargs
) -> Graph:
"""Return OGMS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-19"
Version to retrieve
The available versions are:
- 2021-08-19
"""
return AutomaticallyRetrievedGraph(
"OGMS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def NCBITAXON(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-14", **kwargs
) -> Graph:
"""Return NCBITAXON graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-14"
Version to retrieve
The available versions are:
- 2022-02-21
- 2021-06-10
- 2021-12-14
"""
return AutomaticallyRetrievedGraph(
"NCBITAXON", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FOODON(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-11-25", **kwargs
) -> Graph:
"""Return FOODON graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-11-25"
Version to retrieve
The available versions are:
- 2022-02-01
- 2021-09-15
- 2021-11-25
"""
return AutomaticallyRetrievedGraph(
"FOODON", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PW(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "7.52", **kwargs
) -> Graph:
"""Return PW graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "7.52"
Version to retrieve
The available versions are:
- 7.52
"""
return AutomaticallyRetrievedGraph(
"PW", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FOVT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-10-29", **kwargs
) -> Graph:
"""Return FOVT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-10-29"
Version to retrieve
The available versions are:
- 2021-11-10
- 2021-06-02
- 2021-10-29
"""
return AutomaticallyRetrievedGraph(
"FOVT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def XPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-03-05", **kwargs
) -> Graph:
"""Return XPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-03-05"
Version to retrieve
The available versions are:
- 2021-03-05
"""
return AutomaticallyRetrievedGraph(
"XPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ZFS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-10", **kwargs
) -> Graph:
"""Return ZFS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-10"
Version to retrieve
The available versions are:
- 2020-03-10
"""
return AutomaticallyRetrievedGraph(
"ZFS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def RS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "6.107", **kwargs
) -> Graph:
"""Return RS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "6.107"
Version to retrieve
The available versions are:
- 6.107
"""
return AutomaticallyRetrievedGraph(
"RS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CTO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return CTO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"CTO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-06-08", **kwargs
) -> Graph:
"""Return OMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-06-08"
Version to retrieve
The available versions are:
- 2022-04-27
- 2020-06-08
"""
return AutomaticallyRetrievedGraph(
"OMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FIX(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-04-13", **kwargs
) -> Graph:
"""Return FIX graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-04-13"
Version to retrieve
The available versions are:
- 2020-04-13
"""
return AutomaticallyRetrievedGraph(
"FIX", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MAMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return MAMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"MAMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def VTO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-11-13", **kwargs
) -> Graph:
"""Return VTO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-11-13"
Version to retrieve
The available versions are:
- 2020-11-13
"""
return AutomaticallyRetrievedGraph(
"VTO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def UBERON(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-17", **kwargs
) -> Graph:
"""Return UBERON graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-17"
Version to retrieve
The available versions are:
- 2022-05-27
- 2021-10-01
- 2021-11-28
- 2022-02-21
- 2022-04-05
- 2022-04-18
- 2022-05-17
"""
return AutomaticallyRetrievedGraph(
"UBERON", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MFOMD(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-04-26", **kwargs
) -> Graph:
"""Return MFOMD graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-04-26"
Version to retrieve
The available versions are:
- 2020-04-26
"""
return AutomaticallyRetrievedGraph(
"MFOMD", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def BFO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-08-26", **kwargs
) -> Graph:
"""Return BFO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-08-26"
Version to retrieve
The available versions are:
- 2019-08-26
"""
return AutomaticallyRetrievedGraph(
"BFO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HTN(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return HTN graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"HTN", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PORO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2016-09-13", **kwargs
) -> Graph:
"""Return PORO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2016-09-13"
Version to retrieve
The available versions are:
- 2016-10-06
- 2016-09-13
"""
return AutomaticallyRetrievedGraph(
"PORO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def AISM(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-13", **kwargs
) -> Graph:
"""Return AISM graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-13"
Version to retrieve
The available versions are:
- 2022-03-17
- 2021-09-08
- 2021-11-19
- 2021-12-13
"""
return AutomaticallyRetrievedGraph(
"AISM", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def WBBT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-14", **kwargs
) -> Graph:
"""Return WBBT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-14"
Version to retrieve
The available versions are:
- 2022-03-22
- 2021-09-27
- 2021-12-14
"""
return AutomaticallyRetrievedGraph(
"WBBT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return HAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"HAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-07-12", **kwargs
) -> Graph:
"""Return SO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-07-12"
Version to retrieve
The available versions are:
- 2021-11-22
- 2021-07-12
"""
return AutomaticallyRetrievedGraph(
"SO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def RO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-04-25", **kwargs
) -> Graph:
"""Return RO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-04-25"
Version to retrieve
The available versions are:
- 2022-05-23
- 2021-08-31
- 2021-10-27
- 2021-12-06
- 2022-01-20
- 2022-02-07
- 2022-04-25
"""
return AutomaticallyRetrievedGraph(
"RO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MONDO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-02", **kwargs
) -> Graph:
"""Return MONDO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-02"
Version to retrieve
The available versions are:
- 2022-06-01
- 2021-09-01
- 2021-10-01
- 2021-11-01
- 2021-12-01
- 2021-12-30
- 2022-02-04
- 2022-03-01
- 2022-04-04
- 2022-05-02
"""
return AutomaticallyRetrievedGraph(
"MONDO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DDPHENO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-06-19", **kwargs
) -> Graph:
"""Return DDPHENO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-06-19"
Version to retrieve
The available versions are:
- 2022-01-19
- 2020-06-19
"""
return AutomaticallyRetrievedGraph(
"DDPHENO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def IDOMAL(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-03-16", **kwargs
) -> Graph:
"""Return IDOMAL graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-03-16"
Version to retrieve
The available versions are:
- 2015-03-16
"""
return AutomaticallyRetrievedGraph(
"IDOMAL", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MAXO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-06", **kwargs
) -> Graph:
"""Return MAXO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-06"
Version to retrieve
The available versions are:
- 2022-06-01
- 2021-08-19
- 2022-03-23
- 2022-04-08
- 2022-04-11
- 2022-05-06
"""
return AutomaticallyRetrievedGraph(
"MAXO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBCV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-24", **kwargs
) -> Graph:
"""Return FBCV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-24"
Version to retrieve
The available versions are:
- 2022-04-14
- 2021-09-02
- 2021-10-18
- 2021-12-13
- 2022-01-24
- 2022-02-24
"""
return AutomaticallyRetrievedGraph(
"FBCV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TRANS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-08-04", **kwargs
) -> Graph:
"""Return TRANS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-08-04"
Version to retrieve
The available versions are:
- 2020-08-04
"""
return AutomaticallyRetrievedGraph(
"TRANS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PSDO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-12-04", **kwargs
) -> Graph:
"""Return PSDO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-12-04"
Version to retrieve
The available versions are:
- 2020-12-04
"""
return AutomaticallyRetrievedGraph(
"PSDO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SCDO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-04-15", **kwargs
) -> Graph:
"""Return SCDO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-04-15"
Version to retrieve
The available versions are:
- 2021-04-15
"""
return AutomaticallyRetrievedGraph(
"SCDO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def LEPAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-11-20", **kwargs
) -> Graph:
"""Return LEPAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-11-20"
Version to retrieve
The available versions are:
- 2021-11-20
"""
return AutomaticallyRetrievedGraph(
"LEPAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ONTONEO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-04-29", **kwargs
) -> Graph:
"""Return ONTONEO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-04-29"
Version to retrieve
The available versions are:
- 2021-04-29
"""
return AutomaticallyRetrievedGraph(
"ONTONEO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DRON(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-01-28", **kwargs
) -> Graph:
"""Return DRON graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-01-28"
Version to retrieve
The available versions are:
- 2022-04-22
- 2021-08-12
- 2021-10-20
- 2022-01-28
"""
return AutomaticallyRetrievedGraph(
"DRON", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def RBO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-04-16", **kwargs
) -> Graph:
"""Return RBO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-04-16"
Version to retrieve
The available versions are:
- 2022-06-02
- 2021-08-30
- 2022-01-14
- 2022-04-16
"""
return AutomaticallyRetrievedGraph(
"RBO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def NCIT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-17", **kwargs
) -> Graph:
"""Return NCIT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-17"
Version to retrieve
The available versions are:
- 2022-04-14
- 2021-08-20
- 2021-12-17
"""
return AutomaticallyRetrievedGraph(
"NCIT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FMA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-04-13", **kwargs
) -> Graph:
"""Return FMA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-04-13"
Version to retrieve
The available versions are:
- 2020-04-13
"""
return AutomaticallyRetrievedGraph(
"FMA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def REX(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2017-11-19", **kwargs
) -> Graph:
"""Return REX graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2017-11-19"
Version to retrieve
The available versions are:
- 2017-11-19
"""
return AutomaticallyRetrievedGraph(
"REX", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def COB(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-13", **kwargs
) -> Graph:
"""Return COB graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-13"
Version to retrieve
The available versions are:
- 2022-05-02
- 2021-09-13
"""
return AutomaticallyRetrievedGraph(
"COB", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SIBO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-06-15", **kwargs
) -> Graph:
"""Return SIBO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-06-15"
Version to retrieve
The available versions are:
- 2015-06-15
"""
return AutomaticallyRetrievedGraph(
"SIBO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PDRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-06-08", **kwargs
) -> Graph:
"""Return PDRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-06-08"
Version to retrieve
The available versions are:
- 2021-06-08
"""
return AutomaticallyRetrievedGraph(
"PDRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OGG(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "12-01-2016", **kwargs
) -> Graph:
"""Return OGG graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "12-01-2016"
Version to retrieve
The available versions are:
- 12-01-2016
"""
return AutomaticallyRetrievedGraph(
"OGG", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def XLMOD(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return XLMOD graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- 2019-10-28
- no_version
"""
return AutomaticallyRetrievedGraph(
"XLMOD", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HANCESTRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-12-18", **kwargs
) -> Graph:
"""Return HANCESTRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-12-18"
Version to retrieve
The available versions are:
- 2022-05-12
- 2020-12-18
"""
return AutomaticallyRetrievedGraph(
"HANCESTRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-03-22", **kwargs
) -> Graph:
"""Return GO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-03-22"
Version to retrieve
The available versions are:
- 2022-05-16
- 2021-09-01
- 2021-10-26
- 2021-11-16
- 2021-12-15
- 2022-01-13
- 2022-03-10
- 2022-03-22
"""
return AutomaticallyRetrievedGraph(
"GO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MF(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-21", **kwargs
) -> Graph:
"""Return MF graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-21"
Version to retrieve
The available versions are:
- 2021-11-17
- 2021-09-21
"""
return AutomaticallyRetrievedGraph(
"MF", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GSSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.0.5", **kwargs
) -> Graph:
"""Return GSSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.0.5"
Version to retrieve
The available versions are:
- 2.0.5
"""
return AutomaticallyRetrievedGraph(
"GSSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def UPHENO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return UPHENO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"UPHENO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PLANA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-10-21", **kwargs
) -> Graph:
"""Return PLANA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-10-21"
Version to retrieve
The available versions are:
- releases
- 2021-09-29
- 2021-10-06
- 2021-10-21
"""
return AutomaticallyRetrievedGraph(
"PLANA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OAE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "1.2.44", **kwargs
) -> Graph:
"""Return OAE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "1.2.44"
Version to retrieve
The available versions are:
- 1.2.44
"""
return AutomaticallyRetrievedGraph(
"OAE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MMUSDV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-10", **kwargs
) -> Graph:
"""Return MMUSDV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-10"
Version to retrieve
The available versions are:
- 2020-03-10
"""
return AutomaticallyRetrievedGraph(
"MMUSDV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "4.1.88", **kwargs
) -> Graph:
"""Return MS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "4.1.88"
Version to retrieve
The available versions are:
- 4.1.89
- 4.1.35
- 4.1.62
- 4.1.64
- 4.1.65
- 4.1.67
- 4.1.69
- 4.1.70
- 4.1.71
- 4.1.78
- 4.1.82
- 4.1.83
- 4.1.84
- 4.1.86
- 4.1.88
"""
return AutomaticallyRetrievedGraph(
"MS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def APOLLO_SV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "v4.1.1.", **kwargs
) -> Graph:
"""Return APOLLO_SV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "v4.1.1."
Version to retrieve
The available versions are:
- v4.1.1.
"""
return AutomaticallyRetrievedGraph(
"APOLLO_SV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HSAPDV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-10", **kwargs
) -> Graph:
"""Return HSAPDV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-10"
Version to retrieve
The available versions are:
- 2020-03-10
"""
return AutomaticallyRetrievedGraph(
"HSAPDV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def VO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "September_18__2021", **kwargs
) -> Graph:
"""Return VO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "September_18__2021"
Version to retrieve
The available versions are:
- March-19--2022
- September_18__2021
"""
return AutomaticallyRetrievedGraph(
"VO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MIRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2014-05-14", **kwargs
) -> Graph:
"""Return MIRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2014-05-14"
Version to retrieve
The available versions are:
- 2014-05-14
"""
return AutomaticallyRetrievedGraph(
"MIRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EMAPA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-01", **kwargs
) -> Graph:
"""Return EMAPA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-01"
Version to retrieve
The available versions are:
- 2021-09-01
"""
return AutomaticallyRetrievedGraph(
"EMAPA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GECKO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-01-18", **kwargs
) -> Graph:
"""Return GECKO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-01-18"
Version to retrieve
The available versions are:
- 2021-01-18
"""
return AutomaticallyRetrievedGraph(
"GECKO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CARO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-18", **kwargs
) -> Graph:
"""Return CARO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-18"
Version to retrieve
The available versions are:
- 2022-02-18
"""
return AutomaticallyRetrievedGraph(
"CARO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GENEPIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-01-26", **kwargs
) -> Graph:
"""Return GENEPIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-01-26"
Version to retrieve
The available versions are:
- 2022-02-06
- 2021-05-24
- 2022-01-26
"""
return AutomaticallyRetrievedGraph(
"GENEPIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TADS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-08-20", **kwargs
) -> Graph:
"""Return TADS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-08-20"
Version to retrieve
The available versions are:
- 2015-08-20
"""
return AutomaticallyRetrievedGraph(
"TADS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-07", **kwargs
) -> Graph:
"""Return FAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-07"
Version to retrieve
The available versions are:
- 2020-05-07
"""
return AutomaticallyRetrievedGraph(
"FAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CVDO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-05", **kwargs
) -> Graph:
"""Return CVDO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-05"
Version to retrieve
The available versions are:
- 2020-03-05
"""
return AutomaticallyRetrievedGraph(
"CVDO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ECAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-22", **kwargs
) -> Graph:
"""Return ECAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-22"
Version to retrieve
The available versions are:
- 2020-05-22
"""
return AutomaticallyRetrievedGraph(
"ECAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OHPI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "releases", **kwargs
) -> Graph:
"""Return OHPI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "releases"
Version to retrieve
The available versions are:
- releases
"""
return AutomaticallyRetrievedGraph(
"OHPI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OPL(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-01-28", **kwargs
) -> Graph:
"""Return OPL graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-01-28"
Version to retrieve
The available versions are:
- 2021-01-28
"""
return AutomaticallyRetrievedGraph(
"OPL", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TGMA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2013-06-03", **kwargs
) -> Graph:
"""Return TGMA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2013-06-03"
Version to retrieve
The available versions are:
- 2013-06-03
"""
return AutomaticallyRetrievedGraph(
"TGMA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def BCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-27", **kwargs
) -> Graph:
"""Return BCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-27"
Version to retrieve
The available versions are:
- 2021-11-14
- 2020-03-27
"""
return AutomaticallyRetrievedGraph(
"BCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ICO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-04-21", **kwargs
) -> Graph:
"""Return ICO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-04-21"
Version to retrieve
The available versions are:
- 2021-04-21
"""
return AutomaticallyRetrievedGraph(
"ICO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ZECO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-06-04", **kwargs
) -> Graph:
"""Return ZECO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-06-04"
Version to retrieve
The available versions are:
- 2022-02-14
- 2021-06-04
"""
return AutomaticallyRetrievedGraph(
"ZECO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PHIPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-07-14", **kwargs
) -> Graph:
"""Return PHIPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-07-14"
Version to retrieve
The available versions are:
- 2021-07-14
"""
return AutomaticallyRetrievedGraph(
"PHIPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PDUMDV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-10", **kwargs
) -> Graph:
"""Return PDUMDV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-10"
Version to retrieve
The available versions are:
- 2020-03-10
"""
return AutomaticallyRetrievedGraph(
"PDUMDV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ARO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "05-10-2021-09-37", **kwargs
) -> Graph:
"""Return ARO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "05-10-2021-09-37"
Version to retrieve
The available versions are:
- 05-10-2021-09-37
"""
return AutomaticallyRetrievedGraph(
"ARO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OARCS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return OARCS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"OARCS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PCL(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-03-02", **kwargs
) -> Graph:
"""Return PCL graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-03-02"
Version to retrieve
The available versions are:
- 2022-04-27
- 2022-01-24
- 2022-02-02
- 2022-02-09
- 2022-03-02
"""
return AutomaticallyRetrievedGraph(
"PCL", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CTENO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2016-10-19", **kwargs
) -> Graph:
"""Return CTENO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2016-10-19"
Version to retrieve
The available versions are:
- 2016-10-19
"""
return AutomaticallyRetrievedGraph(
"CTENO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PLANP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-28", **kwargs
) -> Graph:
"""Return PLANP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-28"
Version to retrieve
The available versions are:
- 2020-03-28
"""
return AutomaticallyRetrievedGraph(
"PLANP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DOID(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-04-01", **kwargs
) -> Graph:
"""Return DOID graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-04-01"
Version to retrieve
The available versions are:
- 2022-04-28
- 2021-10-01
- 2021-10-12
- 2021-11-17
- 2021-12-15
- 2022-01-31
- 2022-02-21
- 2022-03-02
- 2022-04-01
"""
return AutomaticallyRetrievedGraph(
"DOID", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OMRSE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-30", **kwargs
) -> Graph:
"""Return OMRSE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-30"
Version to retrieve
The available versions are:
- 2022-04-06
- 2021-08-30
"""
return AutomaticallyRetrievedGraph(
"OMRSE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2018-10-26", **kwargs
) -> Graph:
"""Return PPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2018-10-26"
Version to retrieve
The available versions are:
- 2018-10-26
"""
return AutomaticallyRetrievedGraph(
"PPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OVAE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "04-11-2016", **kwargs
) -> Graph:
"""Return OVAE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "04-11-2016"
Version to retrieve
The available versions are:
- 04-11-2016
"""
return AutomaticallyRetrievedGraph(
"OVAE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ZP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-12", **kwargs
) -> Graph:
"""Return ZP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-12"
Version to retrieve
The available versions are:
- 2021-12-12
"""
return AutomaticallyRetrievedGraph(
"ZP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def STATO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "RC1.4", **kwargs
) -> Graph:
"""Return STATO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "RC1.4"
Version to retrieve
The available versions are:
- RC1.4
"""
return AutomaticallyRetrievedGraph(
"STATO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ONE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return ONE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"ONE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ECTO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-04", **kwargs
) -> Graph:
"""Return ECTO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-04"
Version to retrieve
The available versions are:
- 2022-05-12
- 2021-08-25
- 2022-03-09
- 2022-05-04
"""
return AutomaticallyRetrievedGraph(
"ECTO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def XAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-03-04", **kwargs
) -> Graph:
"""Return XAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-03-04"
Version to retrieve
The available versions are:
- 2021-03-04
"""
return AutomaticallyRetrievedGraph(
"XAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MIAPA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return MIAPA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"MIAPA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-04-13", **kwargs
) -> Graph:
"""Return MI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-04-13"
Version to retrieve
The available versions are:
- 2020-04-13
"""
return AutomaticallyRetrievedGraph(
"MI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ECOCORE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-02-17", **kwargs
) -> Graph:
"""Return ECOCORE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-02-17"
Version to retrieve
The available versions are:
- 2022-03-09
- 2021-02-17
"""
return AutomaticallyRetrievedGraph(
"ECOCORE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.39", **kwargs
) -> Graph:
"""Return MMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.39"
Version to retrieve
The available versions are:
- 2.39
"""
return AutomaticallyRetrievedGraph(
"MMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EUPATH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-09", **kwargs
) -> Graph:
"""Return EUPATH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-09"
Version to retrieve
The available versions are:
- 2022-02-15
- 2021-09-09
"""
return AutomaticallyRetrievedGraph(
"EUPATH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OBIB(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-02-02", **kwargs
) -> Graph:
"""Return OBIB graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-02-02"
Version to retrieve
The available versions are:
- 2021-11-12
- 2021-02-02
"""
return AutomaticallyRetrievedGraph(
"OBIB", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def IDO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2017-11-03", **kwargs
) -> Graph:
"""Return IDO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2017-11-03"
Version to retrieve
The available versions are:
- 2017-11-03
"""
return AutomaticallyRetrievedGraph(
"IDO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def SEPIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return SEPIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"SEPIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TTO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "19-07-2012-13-26", **kwargs
) -> Graph:
"""Return TTO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "19-07-2012-13-26"
Version to retrieve
The available versions are:
- 19-07-2012-13-26
"""
return AutomaticallyRetrievedGraph(
"TTO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PR(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "65.0", **kwargs
) -> Graph:
"""Return PR graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "65.0"
Version to retrieve
The available versions are:
- 66.0
- 63.0
- 64.0
- 65.0
"""
return AutomaticallyRetrievedGraph(
"PR", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def NBO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-02-15", **kwargs
) -> Graph:
"""Return NBO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-02-15"
Version to retrieve
The available versions are:
- 2021-02-15
"""
return AutomaticallyRetrievedGraph(
"NBO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def WBPHENOTYPE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-20", **kwargs
) -> Graph:
"""Return WBPHENOTYPE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-20"
Version to retrieve
The available versions are:
- 2022-03-22
- 2021-09-27
- 2021-10-25
- 2021-12-12
- 2021-12-20
"""
return AutomaticallyRetrievedGraph(
"WBPHENOTYPE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PECO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-08-21", **kwargs
) -> Graph:
"""Return PECO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-08-21"
Version to retrieve
The available versions are:
- 2020-08-21
"""
return AutomaticallyRetrievedGraph(
"PECO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GAZ(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return GAZ graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"GAZ", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-03-10", **kwargs
) -> Graph:
"""Return CIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-03-10"
Version to retrieve
The available versions are:
- 2015-03-10
"""
return AutomaticallyRetrievedGraph(
"CIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def INO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "1.0.112", **kwargs
) -> Graph:
"""Return INO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "1.0.112"
Version to retrieve
The available versions are:
- 1.0.112
"""
return AutomaticallyRetrievedGraph(
"INO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CLAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-27", **kwargs
) -> Graph:
"""Return CLAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-27"
Version to retrieve
The available versions are:
- 2021-09-27
"""
return AutomaticallyRetrievedGraph(
"CLAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def UPA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2018-12-12", **kwargs
) -> Graph:
"""Return UPA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2018-12-12"
Version to retrieve
The available versions are:
- 2018-12-12
"""
return AutomaticallyRetrievedGraph(
"UPA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def NOMEN(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return NOMEN graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"NOMEN", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ZFA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-28", **kwargs
) -> Graph:
"""Return ZFA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-28"
Version to retrieve
The available versions are:
- 2022-03-15
- 2020-04-14
- 2021-12-09
- 2022-02-15
- 2022-02-28
"""
return AutomaticallyRetrievedGraph(
"ZFA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DISDRIV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return DISDRIV graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"DISDRIV", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CIDO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "09-03-2021", **kwargs
) -> Graph:
"""Return CIDO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "09-03-2021"
Version to retrieve
The available versions are:
- 09-03-2021
"""
return AutomaticallyRetrievedGraph(
"CIDO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def COLAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-12-14", **kwargs
) -> Graph:
"""Return COLAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-12-14"
Version to retrieve
The available versions are:
- 2021-12-14
"""
return AutomaticallyRetrievedGraph(
"COLAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def KISAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.30", **kwargs
) -> Graph:
"""Return KISAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.30"
Version to retrieve
The available versions are:
- 2.30
"""
return AutomaticallyRetrievedGraph(
"KISAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2017-02-07", **kwargs
) -> Graph:
"""Return MA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2017-02-07"
Version to retrieve
The available versions are:
- 2017-02-07
"""
return AutomaticallyRetrievedGraph(
"MA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-13", **kwargs
) -> Graph:
"""Return PO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-13"
Version to retrieve
The available versions are:
- 2021-08-13
"""
return AutomaticallyRetrievedGraph(
"PO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CDNO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-01-10", **kwargs
) -> Graph:
"""Return CDNO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-01-10"
Version to retrieve
The available versions are:
- 2022-04-06
- 2021-10-20
- 2022-01-10
"""
return AutomaticallyRetrievedGraph(
"CDNO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ONS(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "1.2.2", **kwargs
) -> Graph:
"""Return ONS graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "1.2.2"
Version to retrieve
The available versions are:
- no_version
- 1.2.2
"""
return AutomaticallyRetrievedGraph(
"ONS", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OHD(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2016-06-27", **kwargs
) -> Graph:
"""Return OHD graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2016-06-27"
Version to retrieve
The available versions are:
- 2016-06-27
"""
return AutomaticallyRetrievedGraph(
"OHD", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def VARIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return VARIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"VARIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def AGRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-07-01", **kwargs
) -> Graph:
"""Return AGRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-07-01"
Version to retrieve
The available versions are:
- 2021-11-05
- 2021-07-01
"""
return AutomaticallyRetrievedGraph(
"AGRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DIDEO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-06-11", **kwargs
) -> Graph:
"""Return DIDEO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-06-11"
Version to retrieve
The available versions are:
- 2021-06-11
"""
return AutomaticallyRetrievedGraph(
"DIDEO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def TXPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-03-03", **kwargs
) -> Graph:
"""Return TXPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-03-03"
Version to retrieve
The available versions are:
- 2020-03-03
"""
return AutomaticallyRetrievedGraph(
"TXPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PATO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-20", **kwargs
) -> Graph:
"""Return PATO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-20"
Version to retrieve
The available versions are:
- 2022-05-20
- 2021-09-09
- 2021-11-05
- 2021-12-03
- 2022-01-12
- 2022-02-08
- 2022-02-20
"""
return AutomaticallyRetrievedGraph(
"PATO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HOM(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-01-07", **kwargs
) -> Graph:
"""Return HOM graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2015-01-07"
Version to retrieve
The available versions are:
- 2015-01-07
"""
return AutomaticallyRetrievedGraph(
"HOM", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ECO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-04-11", **kwargs
) -> Graph:
"""Return ECO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-04-11"
Version to retrieve
The available versions are:
- 2022-05-27
- 2021-10-20
- 2021-12-03
- 2022-01-04
- 2022-02-09
- 2022-04-11
"""
return AutomaticallyRetrievedGraph(
"ECO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ICEO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.1", **kwargs
) -> Graph:
"""Return ICEO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.1"
Version to retrieve
The available versions are:
- 2.1
"""
return AutomaticallyRetrievedGraph(
"ICEO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DDANAT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-04-13", **kwargs
) -> Graph:
"""Return DDANAT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-04-13"
Version to retrieve
The available versions are:
- 2020-04-13
"""
return AutomaticallyRetrievedGraph(
"DDANAT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def BSPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-22", **kwargs
) -> Graph:
"""Return BSPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-22"
Version to retrieve
The available versions are:
- 2021-10-13
- 2021-09-22
"""
return AutomaticallyRetrievedGraph(
"BSPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-03-14", **kwargs
) -> Graph:
"""Return MRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-03-14"
Version to retrieve
The available versions are:
- 2022-05-13
- 2021-09-24
- 2021-10-15
- 2021-11-04
- 2021-11-29
- 2021-12-15
- 2022-01-13
- 2022-01-21
- 2022-03-14
"""
return AutomaticallyRetrievedGraph(
"MRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-05-03", **kwargs
) -> Graph:
"""Return PCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-05-03"
Version to retrieve
The available versions are:
- 2021-05-03
"""
return AutomaticallyRetrievedGraph(
"PCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EPSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "", **kwargs
) -> Graph:
"""Return EPSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = ""
Version to retrieve
The available versions are:
- 2021-05-28
-
"""
return AutomaticallyRetrievedGraph(
"EPSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ORNASEQ(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-07-08", **kwargs
) -> Graph:
"""Return ORNASEQ graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-07-08"
Version to retrieve
The available versions are:
- 2019-07-08
"""
return AutomaticallyRetrievedGraph(
"ORNASEQ", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-14", **kwargs
) -> Graph:
"""Return HP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-14"
Version to retrieve
The available versions are:
- 2022-04-14
- 2021-08-02
- 2021-10-10
- 2022-02-14
"""
return AutomaticallyRetrievedGraph(
"HP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def DPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-24", **kwargs
) -> Graph:
"""Return DPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-24"
Version to retrieve
The available versions are:
- 2022-04-13
- 2021-09-02
- 2021-10-15
- 2021-12-10
- 2022-01-24
- 2022-02-24
"""
return AutomaticallyRetrievedGraph(
"DPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CL(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-01-05", **kwargs
) -> Graph:
"""Return CL graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-01-05"
Version to retrieve
The available versions are:
- 2022-02-16
- 2021-09-09
- 2021-11-25
- 2021-12-07
- 2021-12-16
- 2022-01-05
"""
return AutomaticallyRetrievedGraph(
"CL", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MFOEM(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-09-21", **kwargs
) -> Graph:
"""Return MFOEM graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-09-21"
Version to retrieve
The available versions are:
- 2021-11-17
- 2021-09-21
"""
return AutomaticallyRetrievedGraph(
"MFOEM", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
| 35.222256 | 96 | 0.661585 |
e1719d8ebd372ec11c0a39ad33fb890f8a55ef58
| 459 |
py
|
Python
|
src/xrt/targets/steamvr_drv/copy_plugin.py
|
leviathanch/monado
|
36a540a764fd5529018dfceb28e10804db9596bf
|
[
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 2 |
2021-11-08T05:17:12.000Z
|
2022-01-24T12:50:59.000Z
|
src/xrt/targets/steamvr_drv/copy_plugin.py
|
SimulaVR/monado
|
b5d46eebf5f9b7f96a52639484a1b35d8ab3cd21
|
[
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null |
src/xrt/targets/steamvr_drv/copy_plugin.py
|
SimulaVR/monado
|
b5d46eebf5f9b7f96a52639484a1b35d8ab3cd21
|
[
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2020, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
import os, sys, shutil
# get absolute input and output paths
input_path = os.path.join(
sys.argv[1])
output_path = os.path.join(
sys.argv[2])
# make sure destination directory exists
os.makedirs(os.path.dirname(output_path), exist_ok=True)
shutil.copyfile(input_path, output_path)
print("Copying plugin " + str(input_path) + " to " + str(output_path))
| 21.857143 | 70 | 0.72549 |
e1916ae94e8c8503838cc7fb1f8cdfc99c775f4d
| 233 |
py
|
Python
|
2-resources/_PYTHON/code-examples-master/aws/python/rds/list_rds_instances.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_PYTHON/code-examples-master/aws/python/rds/list_rds_instances.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_PYTHON/code-examples-master/aws/python/rds/list_rds_instances.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
import boto3
session = boto3.Session(region_name='eu-west-1', profile_name='test')
rds = session.client('rds')
response = rds.describe_db_instances()
for rds_dbs in response['DBInstances']:
print(rds_dbs['DBInstanceIdentifier'])
| 23.3 | 69 | 0.763948 |
363450b17a5ec7ebe740b2e6f3abee0aae1056f1
| 697 |
py
|
Python
|
python_reference/useful_scripts/read_csv_to_dict.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
python_reference/useful_scripts/read_csv_to_dict.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
python_reference/useful_scripts/read_csv_to_dict.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-10-14T07:30:18.000Z
|
2019-10-14T07:30:18.000Z
|
# Sebastian Raschka, 03/2014
def read_csv(csv_path):
"""
Read a csv file (with a header) into a dictionary
where the dictionary structure will be
{header_col1: [val1_line1, val1_line2, ...], header_col2: [val2_line1, val2_line2, ...], ...}
"""
with open(csv_path, 'r') as in_csv:
header = in_csv.readline().strip().split(',')
data = {i:[] for i in header}
for line in in_csv:
line = line.strip().split(',')
for i in range(len(line)):
try:
data[header[i]].append(float(line[i]))
except ValueError:
data[header[i]].append(line[i])
return data
| 31.681818 | 97 | 0.53802 |
36939b23d5e0aa0155df7912ce8908decc6f5958
| 318 |
py
|
Python
|
FizzBuzz/Python/fizzbuzz.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
FizzBuzz/Python/fizzbuzz.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
FizzBuzz/Python/fizzbuzz.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
"""
FizzBuzz
Written by: Ian Doarn
"""
def fizzbuzz(n):
if n % 3 == 0 and n % 5 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
else:
return str(n)
if __name__ == '__main__':
for n in range(1, 101):
print(fizzbuzz(n))
| 16.736842 | 33 | 0.493711 |
36e34b05ce9277ef98a6d95195bb6e2724b47f33
| 1,997 |
py
|
Python
|
test/test_npu/test_onnx/torch.onnx/custom_ops_demo/test_custom_ops_addcmul.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_onnx/torch.onnx/custom_ops_demo/test_custom_ops_addcmul.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_onnx/torch.onnx/custom_ops_demo/test_custom_ops_addcmul.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.utils.cpp_extension
import torch.nn as nn
import numpy as np
def do_export(model, inputs, *args, **kwargs):
out = torch.onnx._export(model, inputs, "./onnx/custom_op_addcmul.onnx", verbose=True, *args, **kwargs)
###########################################################
class CustomAddModel(torch.nn.Module):
def forward(self, tensor, tensor1, tensor2, out=None):
rtn = torch.addcmul(tensor, tensor1, tensor2, value=1.0)
return rtn
############################################################
def addcmul(g, self, tensor1, tensor2, value=1.0, out=None):
return g.op('torch::addcmul', self, tensor1, tensor2, value)
import torch.onnx.symbolic_registry as sym_registry
sym_registry.register_op('addcmul', addcmul, '', 11)
print('=======================================================================================')
print(sym_registry.is_registered_op('addcmul', '',11))
print('=======================================================================================')
############################################################
# device = 'cpu'
#print('{}-{}-{}-{}-{}-{}-{}'.format(a,b,c,d,e,f,g))
model = CustomAddModel()
t = torch.randn(1, 3)
t1 = torch.randn(3, 1)
t2 = torch.randn(1, 3)
#output = model(t)
#print("output",output)
do_export(model, (t,t1,t2), opset_version=11)
| 34.431034 | 107 | 0.585378 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.