seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
73931882429
|
#!python
"""
A natural number, N, that can be written as the sum and product of a given set of at least two natural numbers, {a1, a2, ... , ak} is called a product-sum number: N = a1 + a2 + ... + ak = a1 × a2 × ... × ak.
For example, 6 = 1 + 2 + 3 = 1 × 2 × 3.
For a given set of size, k, we shall call the smallest N with this property a minimal product-sum number. The minimal product-sum numbers for sets of size, k = 2, 3, 4, 5, and 6 are as follows.
k=2: 4 = 2 × 2 = 2 + 2
k=3: 6 = 1 × 2 × 3 = 1 + 2 + 3
k=4: 8 = 1 × 1 × 2 × 4 = 1 + 1 + 2 + 4
k=5: 8 = 1 × 1 × 2 × 2 × 2 = 1 + 1 + 2 + 2 + 2
k=6: 12 = 1 × 1 × 1 × 1 × 2 × 6 = 1 + 1 + 1 + 1 + 2 + 6
Hence for 2≤k≤6, the sum of all the minimal product-sum numbers is 4+6+8+12 = 30; note that 8 is only counted once in the sum.
In fact, as the complete set of minimal product-sum numbers for 2≤k≤12 is {4, 6, 8, 12, 15, 16}, the sum is 61.
What is the sum of all the minimal product-sum numbers for 2≤k≤12000?
"""
#the smallest possible k, is 1+1+...+1, i.e. k, however this will not be a product sum for k>1
#also notice that 2xkx1x1x1x...x1 (k-2 ones) == 2k = 2 + k + 1+...+1 (k-2 ones)
#so out minimal product sum is k<=mps<=2k so we just need to look for product sums in this range
from functools import reduce, lru_cache
from operator import mul
from pe import prime_factors
def product(l):
return reduce(mul, l)
def is_ps(l):
return sum(l) == product(l)
@lru_cache(maxsize=None)
def c_prime_factors(n):
return prime_factors(n)
@lru_cache(maxsize=None)
def c_all_multiplicands(n):
return list(all_multiplicands(n))
@lru_cache(maxsize=None)
def all_multiplicands(n):
factors = c_prime_factors(n)
res = []
if len(factors)==1:
return [[n]]
ms = []
for i in range(len(factors)):
f = factors[i]
r = sorted(all_multiplicands(product(factors[:i]+factors[i+1:])))
if r not in res:
res.append(r)
for arr in r:
farr = sorted([f]+arr)
if farr not in ms:
ms.append(farr)
for j in range(len(arr)):
farr = sorted(arr[:j]+[f*arr[j]]+arr[j+1:])
if farr not in ms:
ms.append(farr)
return ms
def is_n_ps_for_k(n, k):
for candidate in all_multiplicands(n):
if len(candidate) > k:
continue #this shouldn't really happen
if(sum(candidate)+k-len(candidate))==n:
return True
return False
limit = 12000
s = set([])
for i in range(2,limit+1):
for j in range(i, 2*i+1):
if is_n_ps_for_k(j, i):
print([i, j])
s.add(j)
break
print(sum(s))
|
DanMayhem/project_euler
|
088.py
|
088.py
|
py
| 2,498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20521018910
|
"""!
@brief Assert that are used for testing
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import numpy
class assertion:
@staticmethod
def eq(argument1, argument2):
if isinstance(argument1, numpy.ndarray) or isinstance(argument2, numpy.ndarray):
if not (argument1 == argument2).all():
raise AssertionError("Expected: '" + str(argument1) + "', Actual: '" + str(argument2) + "'")
elif not (argument1 == argument2):
raise AssertionError("Expected: '" + str(argument1) + "', Actual: '" + str(argument2) + "'")
@staticmethod
def eq_float(argument1, argument2, eps):
if abs(argument1 - argument2) >= eps:
raise AssertionError("Expected: '" + str(argument1) + "', Actual: '" + str(argument2) +
"' (eps: '" + str(eps) + "')")
@staticmethod
def gt(argument1, argument2):
if not (argument1 > argument2):
raise AssertionError("Expected: '" + str(argument1) + "' > '" + str(argument2) +
"', Actual: '" + str(argument1) + "' vs '" + str(argument2) + "'")
@staticmethod
def ge(argument1, argument2):
if not (argument1 >= argument2):
raise AssertionError("Expected: '" + str(argument1) + "' >= '" + str(argument2) +
"', Actual: '" + str(argument1) + "' vs '" + str(argument2) + "'")
@staticmethod
def lt(argument1, argument2):
if not (argument1 < argument2):
raise AssertionError("Expected: '" + str(argument1) + "' < '" + str(argument2) +
"', Actual: '" + str(argument1) + "' vs '" + str(argument2) + "'")
@staticmethod
def le(argument1, argument2):
if not (argument1 <= argument2):
raise AssertionError("Expected: '" + str(argument1) + "' <= '" + str(argument2) +
"', Actual: '" + str(argument1) + "' vs '" + str(argument2) + "'")
@staticmethod
def true(argument1, **kwargs):
message = kwargs.get('message', None)
error_message = "Expected: 'True', Actual: '%s'" % str(argument1)
if message:
error_message = "%s, Info: '%s'" % (error_message, message)
if not argument1:
raise AssertionError(error_message)
@staticmethod
def false(argument1, **kwargs):
message = kwargs.get('message', None)
error_message = "Expected: 'False', Actual: '%s'" % str(argument1)
if message:
error_message = "%s, Info: '%s'" % (error_message, message)
if argument1:
raise AssertionError(error_message)
@staticmethod
def fail(message=None):
if message is None:
raise AssertionError("Failure")
else:
raise AssertionError("Failure: '" + message + "'")
@staticmethod
def exception(expected_exception, callable_object, *args, **kwargs):
try:
callable_object(*args, **kwargs)
except expected_exception:
return
except Exception as actual_exception:
raise AssertionError("Expected: '%s', Actual: '%s'" %
(expected_exception.__name__, actual_exception.__class__.__name__))
raise AssertionError("Expected: '%s', Actual: 'None'" % expected_exception.__name__)
|
annoviko/pyclustering
|
pyclustering/tests/assertion.py
|
assertion.py
|
py
| 3,532 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
2870616186
|
import operator
import struct
from enum import Enum
from collections import defaultdict, namedtuple, deque
import simulatorOps.utils as utils
from simulatorOps.abstractOp import AbstractOp, ExecutionException
class HalfSignedMemOp(AbstractOp):
saveStateKeys = frozenset(("condition",
"imm", "pre", "sign", "byte", "writeback", "mode", "signed",
"basereg", "rd", "offsetImm", "offsetReg"))
def __init__(self):
super().__init__()
self._type = utils.InstrType.memop
def decode(self):
instrInt = self.instrInt
if not (utils.checkMask(instrInt, (7, 4), (27, 26, 25))):
raise ExecutionException("Le bytecode à cette adresse ne correspond à aucune instruction valide",
internalError=False)
# Retrieve the condition field
self._decodeCondition()
# This is the inverse of LDR/STR, if bit 22 is set, then offset IS an immediate value
self.imm = bool(instrInt & (1 << 22))
self.pre = bool(instrInt & (1 << 24))
self.sign = 1 if instrInt & (1 << 23) else -1
self.byte = not bool(instrInt & (1 << 5))
self.signed = bool(instrInt & (1 << 6))
# See 4.9.1 (with post, writeback is redundant and always implicitely on)
self.writeback = bool(instrInt & (1 << 21)) or not self.pre
self.mode = "LDR" if instrInt & (1 << 20) else "STR"
self.basereg = (instrInt >> 16) & 0xF
self.rd = (instrInt >> 12) & 0xF
if self.imm:
# The immediate offset is divided in 2 nibbles:
# the 4 LSB are at positions [3, 2, 1, 0]
# the 4 MSB are at positions [11, 10, 9, 8]
self.offsetImm = instrInt & 0xF + ((instrInt >> 4) & 0xF0)
else:
# No shift allowed with these instructions
self.offsetReg = instrInt & 0xF
def explain(self, simulatorContext):
self.resetAccessStates()
bank = simulatorContext.regs.mode
simulatorContext.regs.deactivateBreakpoints()
disassembly = self.mode
description = "<ol>\n"
disCond, descCond = self._explainCondition()
description += descCond
disassembly += disCond
self._readregs = utils.registerWithCurrentBank(self.basereg, bank)
addr = baseval = simulatorContext.regs[self.basereg]
description += "<li>Utilise la valeur du registre {} comme adresse de base</li>\n".format(utils.regSuffixWithBank(self.basereg, bank))
descoffset = ""
if self.imm:
addr += self.sign * self.offsetImm
if self.offsetImm > 0:
if self.sign > 0:
descoffset = "<li>Additionne la constante {} à l'adresse de base</li>\n".format(self.offsetImm)
else:
descoffset = "<li>Soustrait la constante {} à l'adresse de base</li>\n".format(self.offsetImm)
else:
regDesc = utils.regSuffixWithBank(self.offsetReg, bank)
if self.sign > 0:
descoffset = "<li>Additionne le registre {} à l'adresse de base</li>\n".format(regDesc)
else:
descoffset = "<li>Soustrait le registre {} à l'adresse de base</li>\n".format(regDesc)
addr += self.sign * simulatorContext.regs[self.offsetReg]
self._readregs |= utils.registerWithCurrentBank(self.offsetReg, bank)
realAddr = addr if self.pre else baseval
sizeaccess = 1 if self.byte else 2
sizedesc = "1 octet" if sizeaccess == 1 else "{} octets".format(sizeaccess)
disassembly += "S" if self.signed else ""
disassembly += "B" if sizeaccess == 1 else "H" if sizeaccess == 2 else ""
disassembly += " R{}, [R{}".format(self.rd, self.basereg)
if self.mode == 'LDR':
if self.pre:
description += descoffset
description += "<li>Lit {} à partir de l'adresse obtenue (pré-incrément) et stocke le résultat dans {} (LDR)</li>\n".format(sizedesc, utils.regSuffixWithBank(self.rd, bank))
else:
description += "<li>Lit {} à partir de l'adresse de base et stocke le résultat dans {} (LDR)</li>\n".format(sizedesc, utils.regSuffixWithBank(self.rd, bank))
description += descoffset
if self.signed:
description += "<li>Copie la valeur du bit {} sur les bits {} à 31 du registre de destination</li>\n".format(7 if self.byte else 15, 8 if self.byte else 16)
self._readmem = set(range(realAddr, realAddr+sizeaccess))
self._writeregs |= utils.registerWithCurrentBank(self.rd, bank)
if self.rd == simulatorContext.PC:
try:
m = simulatorContext.mem.get(realAddr, size=sizeaccess, mayTriggerBkpt=False)
except ExecutionException as ex:
# We do not want to handle user errors here;
# If there is an issue with the memory access, we simply carry on
pass
else:
if m is not None:
res = struct.unpack("<B" if self.byte else "<H", m)[0]
self._nextInstrAddr = res
else: # STR
descRange = " de l'octet le moins significatif" if self.byte else " des 2 octets les moins significatifs"
if self.pre:
description += descoffset
description += "<li>Copie la valeur" + descRange + " registre {} dans la mémoire, à l'adresse obtenue à l'étape précédente (pré-incrément), sur {} (STR)</li>\n".format(utils.regSuffixWithBank(self.rd, bank), sizedesc)
else:
description += "<li>Copie la valeur" + descRange + " registre {} dans la mémoire, à l'adresse de base, sur {} (STR)</li>\n".format(utils.regSuffixWithBank(self.rd, bank), sizedesc)
description += descoffset
self._writemem = set(range(realAddr, realAddr+sizeaccess))
self._readregs |= utils.registerWithCurrentBank(self.rd, bank)
if self.pre:
if self.imm:
if self.offsetImm == 0:
disassembly += "]"
else:
disassembly += ", #{}]".format(hex(self.sign * self.offsetImm))
else:
disassembly += ", R{}".format(self.offsetReg) + "]"
else:
# Post (a post-incrementation of 0 is useless)
disassembly += "]"
if self.imm and self.offsetImm != 0:
disassembly += ", #{}".format(hex(self.sign * self.offsetImm))
elif not self.imm:
disassembly += ", R{}".format(self.offsetReg)
#else:
# Weird case, would happen if we combine post-incrementation and immediate offset of 0
# disassembly += "]"
if self.writeback:
self._writeregs |= utils.registerWithCurrentBank(self.basereg, bank)
description += "<li>Écrit l'adresse effective dans le registre de base {} (mode writeback)</li>\n".format(utils.regSuffixWithBank(self.basereg, bank))
if self.pre:
disassembly += "!"
description += "</ol>"
simulatorContext.regs.reactivateBreakpoints()
return disassembly, description
def execute(self, simulatorContext):
if not self._checkCondition(simulatorContext.regs):
# Nothing to do, instruction not executed
self.countExecConditionFalse += 1
return
self.countExec += 1
addr = baseval = simulatorContext.regs[self.basereg]
if self.imm:
addr += self.sign * self.offsetImm
else:
addr += self.sign * simulatorContext.regs[self.offsetReg]
realAddr = addr if self.pre else baseval
s = 1 if self.byte else 2
if self.mode == 'LDR':
m = simulatorContext.mem.get(realAddr, size=s)
if m is None: # No such address in the mapped memory, we cannot continue
raise ExecutionException("Tentative de lecture de {} octets à partir de l'adresse {} invalide : mémoire non initialisée".format(s, realAddr))
res = struct.unpack("<B" if self.byte else "<H", m)[0]
simulatorContext.regs[self.rd] = res
if self.signed:
simulatorContext.regs[self.rd] |= 0xFFFFFF00 * ((res >> 7) & 1) if self.byte else 0xFFFF0000 * ((res >> 15) & 1)
if self.rd == simulatorContext.PC:
self.pcmodified = True
else: # STR
valWrite = simulatorContext.regs[self.rd]
if self.rd == simulatorContext.PC and simulatorContext.PCSpecialBehavior:
valWrite += 4 # Special case for PC (see ARM datasheet, 4.9.4)
valWrite &= 0xFFFF
simulatorContext.mem.set(realAddr, valWrite, size=1 if self.byte else 2)
if self.writeback:
simulatorContext.regs[self.basereg] = addr
|
mgard/epater
|
simulatorOps/halfSignedMemOp.py
|
halfSignedMemOp.py
|
py
| 9,179 |
python
|
en
|
code
| 35 |
github-code
|
6
|
28116032092
|
import numpy as np
class LinearRegressionDemo:
def __init__(self, learning_rate=1e-3, n_iters=1000):
# init parameters
self.lr = learning_rate
self.n_iters = n_iters
self.weights = None
self.bias = None
def _get_prediction(self, X):
return np.dot(X, self.weights) + self.bias
def _init_params(self):
self.weights = np.zeros(self.n_features)
self.bias = 0
def _update_params(self, dw, db):
self.weights -= self.lr * dw
self.bias -= self.lr * db
def _get_gradients(self, X, y, y_pred):
# get distance between y_pred and y_true
error = y_pred - y
# compute the gradients of weight & bias
dw = (1 / self.n_samples) * np.dot(X.T, error)
db = (1 / self.n_samples) * np.sum(error)
return dw, db
def fit(self, X, y, X_test):
# get number of samples & features
self.n_samples, self.n_features = X.shape
# init weights & bias
self._init_params()
preds = []
test_preds = []
# perform gradient descent for n iterations
for _ in range(self.n_iters):
# get y_prediction
y_pred = self._get_prediction(X)
# compute gradients
dw, db = self._get_gradients(X, y, y_pred)
# update weights & bias with gradients
self._update_params(dw, db)
preds.append(self.predict(X))
test_preds.append(self.predict(X_test))
#test_rmses.append(mean_squared_error(self.predict(X), y, squared=False))
return preds, test_preds
def predict(self, X):
y_pred = self._get_prediction(X)
return y_pred
|
Nishaa95/Intro_to_Machine_Learning_Student_Workbooks
|
linear_reg_demo_grad_desc.py
|
linear_reg_demo_grad_desc.py
|
py
| 1,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6589861302
|
from elasticsearch import Elasticsearch
import pandas as pd
from contexttimer import Timer
es = Elasticsearch(
"http://rgai3.inf.u-szeged.hu:3427/",
basic_auth=("elastic", "V7uek_ey6EdQbGBz_XHX"),
verify_certs=False
)
def get_highlights(csv, es, size):
# adatok kinyerése pd-ből, a már tisztított kérdésekkel
df = pd.read_csv(csv,
names=['question', 'context'],
header=1,
encoding='utf-8')
milqa_contexts_dict = dict()
for index, record in df.iterrows():
clean_question = record['question']
context = record['context']
if clean_question not in milqa_contexts_dict:
milqa_contexts_dict[clean_question] = context
else:
pass
with Timer() as t:
result_dict = dict()
error_counter = 0
id = 0
match_len = 0
all_context = list()
all_question = list()
for key, value in milqa_contexts_dict.items():
question = key
official_context = value.split("|||")[1]
# query top 10 guesses
body = {
"size": size,
"query": {
"match": {
"document": question
}
}
}
s = es.search(index='milqa_w_lemma_w_official_context', body=body)
result_contexts = list()
result_official_contexts = list()
for hit in s['hits']['hits']:
result_contexts.append(hit["_source"]["document"])
result_official_contexts.append(hit["_source"]["official_document"])
# error_dict = dict()
result_official_contexts_set = set(single_context for single_context in result_official_contexts)
if official_context in result_official_contexts_set:
match_counter = 1
result_number = 0
for result_official_context in result_official_contexts:
if result_official_context == official_context:
result_number = match_counter
break
else:
match_counter += 1
match_len += 1
all_context.append(value)
all_question.append(key)
else:
error_counter += 1
result_number = 'Nincs benne'
all_context.append(value)
all_question.append(key)
if isinstance(result_number, str):
result_dict[id] = result_number
else:
result_dict[id] = (1 / int(result_number))
id += 1
summary = 0.0
error_counter_check = 0
summary_counter = 0
number: float
for key, number in result_dict.items():
if isinstance(number, float):
summary += number
summary_counter += 1
if isinstance(number, str):
error_counter_check += 1
print("összes eltalát eset " + str(size) + " size mérettel: " + str(summary_counter))
print("összes eset " + str(size) + " size mérettel: " + str(len(milqa_contexts_dict)))
print("összes vizsgált számon kívüli eset " + str(size) + " size mérettel: " + str(error_counter_check))
print("összes eltalált/összes eset (Precision@k): " + str(summary_counter / len(milqa_contexts_dict)))
print("MRR: " + str(summary / len(milqa_contexts_dict)) + " | error counter: " + str(
error_counter)) # + "\n" + str(result_dict))# + "\n" + all_context[2] + "\n" + all_question[2])
print(f"Time spent: {t.elapsed:.2f} seconds")
return 0
if __name__ == '__main__':
csv = 'q_wPoS_wLemma_c_wLemma_c_wOfficial.csv'
# csv = 'q_wLemma_c_wLemma_c_wOfficial.csv'
print(get_highlights(csv, es, 300))
# posLemma: 12769 lemma: 12845
# 1 pos lemma:
# összes eltalát eset 1 size mérettel: 8393
# összes eset 1 size mérettel: 12769
# összes vizsgált számon kívüli eset 1 size mérettel: 4376
# összes eltalált/összes eset (Precision@k): 0.657295011355627
# MRR: 0.657295011355627 | error counter: 4376
# Time spent: 75.06 seconds
#
# 300 pos lemma:
# összes eltalát eset 300 size mérettel: 12559
# összes eset 300 size mérettel: 12769
# összes vizsgált számon kívüli eset 300 size mérettel: 210
# összes eltalált/összes eset (Precision@k): 0.9835539196491503
# MRR: 0.7494510958150116 | error counter: 210
# Time spent: 480.42 seconds
#
# 300 lemma:
# összes eltalát eset 300 size mérettel: 12638
# összes eset 300 size mérettel: 12845
# összes vizsgált számon kívüli eset 300 size mérettel: 207
# összes eltalált/összes eset (Precision@k): 0.9838847800700662
# MRR: 0.7403596956400766 | error counter: 207
# Time spent: 599.05 seconds
#
# 1 lemma
# összes eltalát eset 1 size mérettel: 8315
# összes eset 1 size mérettel: 12845
# összes vizsgált számon kívüli eset 1 size mérettel: 4530
# összes eltalált/összes eset (Precision@k): 0.64733359283768
# MRR: 0.64733359283768 | error counter: 4530
# Time spent: 80.92 seconds
|
szegedai/SHunQA
|
scripts/evals/highlights_score_test_w_preprocessed_questions.py
|
highlights_score_test_w_preprocessed_questions.py
|
py
| 5,241 |
python
|
hu
|
code
| 0 |
github-code
|
6
|
36060772305
|
# #!/usr/bin/env python3
import json
import socket
from utils.save_json import save_json
def initSocket(ip, port, diretorio):
dir = 'src/json/'+diretorio+'.json'
dicionario = ''
try:
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind((ip, port))
tcp.listen(2)
while True:
con, cliente = tcp.accept()
print ('Concetado por', cliente)
while True:
msg = con.recv(12288)
msg = msg.decode()
dicionario += msg
# dicionario.append(msg)
# json_distribuido = json.loads(dicionario)
if not msg:
save_json(dir, json.loads(dicionario))
dicionario = ''
break
except KeyboardInterrupt:
sys.exit(0)
|
AntonioAldisio/FSE-2022-2-Trabalho-1
|
src/servidor/servidor.py
|
servidor.py
|
py
| 847 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
6420520466
|
import time
import datetime
import math
import logging
class Logger():
def __init__(self):
self. start_time = time.time()
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
print('Starting ' + str(datetime.datetime.now()))
@staticmethod
def printLog(*messages, no_time = False, logging_level='INFO'):
message = Logger.unwrapMessage(messages)
if no_time:
print(message)
else:
print(str(datetime.datetime.now()) + '\t' + message)
@staticmethod
def unwrapMessage(*messages):
message = ''
for m in messages[0]:
message += str(m) + ' '
return message
def getElapsedTime(self):
time_min, str_report = self.calculateElapsedTime()
print(str_report)
return time_min
def calculateElapsedTime(self):
totalSeconds = time.time() - self.start_time
hours = math.floor(totalSeconds / 3600)
minutes = math.floor(totalSeconds / 60 - hours * 60)
seconds = totalSeconds - (hours * 3600 + minutes * 60)
endDate = datetime.datetime.now()
str_report = 'Time: ' + str(endDate)
str_report += '\n' + "--- Total Time: %s hours: %s minutes %s seconds " % (str(hours), str(minutes), str(seconds))
time_min = int((hours * 60 + minutes + seconds /60)*100)/100
return time_min, str_report
|
Script-2020/autoclusteringFinReports
|
util/Logger.py
|
Logger.py
|
py
| 1,437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28326166820
|
"""
OCR Pagination
"""
from past.utils import old_div
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from django.conf import settings
from ocr.permission import get_permissions
# -------------------------------------------------------------------------------
# pylint: disable=too-many-ancestors
# pylint: disable=no-member
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-locals
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# -------------------------------------------------------------------------------
class CustomOCRPagination(PageNumberPagination):
"""
OCR Pagination
"""
def __init__(self):
self.query_set = None
self.request = None
self.view = None
self.list_serializer = None
def modified_get_paginate_response(self, page):
"""
Desc:
"""
try:
page_number = int(self.request.query_params.get('page_number', settings.PAGENUMBER))
except ValueError:
page_number = settings.PAGENUMBER
try:
page_size = int(self.request.query_params.get('page_size', settings.PAGESIZE))
except ValueError:
page_size = settings.PAGESIZE
pagination = self.get_page_count(page, page_number, page_size)
permission_details = get_permissions(user=self.request.user,
model=self.list_serializer.Meta.model.__name__.lower(),
type='list')
return Response({
'data': pagination["current_data"],
'total_number_of_pages': pagination['count'],
'current_page': pagination['current_page'],
'current_page_size': pagination['current_page_size'],
'current_item_count': len(pagination["current_data"]),
'total_data_count': pagination['total_data_count'],
'permission_details': permission_details
})
def get_page_count(self, page, page_number=1, page_size=10):
"""
Desc:
"""
if page_size < 1:
page_size = 1
total_data_count = len(page)
if total_data_count < 1:
return {
"count": 0,
"current_page": 0,
"current_page_size": 0,
"total_data_count": total_data_count,
"current_data": []
}
total_number_of_pages = (old_div((total_data_count - 1), page_size)) + 1
if page_number > total_number_of_pages:
page_number = 1
page_size = 10
initial_count = (page_number - 1) * page_size
end_count = initial_count + page_size
page_data = page[initial_count:end_count]
serialized_page_data = self.list_serializer(page_data, many=True,
context={"request": self.request})
data = [i for i in serialized_page_data.data if i]
total_data_count = len(data)
# pylint: disable= line-too-long
return {
"count": total_number_of_pages,
"current_page": page_number,
"current_page_size": page_size,
"current_data": data,
"total_data_count": total_data_count
}
def paginate_queryset(self, queryset, request, view=None, list_serializer=None):
"""
Desc:
"""
self.request = request
self.view = view
self.query_set = queryset
self.list_serializer = list_serializer
return self.query_set
|
Srinidhi-SA/temp_spark
|
SPARK_DOCKER/code/mAdvisor-api/ocr/pagination.py
|
pagination.py
|
py
| 3,723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35603813874
|
"""
Duncan Rule, Sally Gao, Yi Hao
"""
class MDPState:
"""State class for a given space in gridworld, with directional attributes pointing to other squares.
Each directional attribute is a tuple of coordinates (x, y). """
def __init__(self, up, down, left, right, reward=-1, value=0):
self.up = up
self.down = down
self.left = left
self.right = right
self.reward = reward
self.value = value
def __str__(self):
return str(self.value)
|
sally-gao/mazemdp
|
mdpstate.py
|
mdpstate.py
|
py
| 507 |
python
|
en
|
code
| 5 |
github-code
|
6
|
39222512159
|
# %% [markdown]
# 複数ファイルの集計をしていく。
#
# 複数のデータを,1つのDataFrame に要約する方法は,主に2通りあるので,両方紹介する。
#
# 1. 1人分のデータをたくさん用意して,最後に複数の行を1つにまとめる
# - concat メソッドを用いる
# 2. 必要な統計量を人数分まとめた列を必要なだけ用意して,最後に複数の列をまとめる
# - DataFrame を ディクショナリで生成する
# %% [markdown]
# ## 1人分のデータをたくさん用意して,最後に複数の行を1つにまとめる
# %%
# 複数人のデータを順番に読んで,同じ処理をすればよい。
# 複数のデータを順番にDataFrame として読むには,
# 読む対象のファイル名を変えていけばよい。
# たとえば,以下の例については,
# df = pd.read_csv('data/psysiological_00', index_col='time')
# 00 の部分を順番に増やしていけばよいだろう。
# 文字列に変数を埋め込む,ということをする。つまり,
# 'data/psysiological_{}'
# の,{} の中を,適切に埋めればよい。
# f 文字列 というものを使う。
# 普通の文字列 '' に, f をつけるだけ。
# さらに {} の中に,埋め込みたい変数を記す。
id = 12
file_name = f'data/psysiological_{id}'
file_name
# %%
# 1桁の場合はちょっと困る
id = 3
file_name = f'data/psysiological_{id}'
file_name
# %%
# 必要な桁数に達するまで 0埋めをする。
# zfill メソッドを用いる。
str(id).zfill(2)
# %%
# zfill をした場合のファイル名。
file_name = f'data/psysiological_{str(id).zfill(2)}'
file_name
# %%
# ファイル名を変えて,データを順番に読み込んでみる
# import
import pandas as pd
# consecutive reading
for id in range(20):
df = pd.read_csv(f'data/psysiological_{str(id).zfill(2)}')
print(df.head(2)) # show only 2 rows
# %% [markdown]
# サマリを作成する操作は全てのデータに対して共通なので,
# for ブロックの中で,サマリを作成するアルゴリズムを書けばよい。
#
# 前回と同様に,正中線上における,90 ms から 110 ms の振幅の平均値を求めたいとしよう。
# さらに,正中線各部位の平均も求めたい。
# %%
# 人数分のサマリを作成する。
centers = ['CPz', 'Pz', 'Oz', 'POz', 'Fz', 'FCz', 'Cz', 'AFz']
for id in range(20):
df = pd.read_csv(f'data/psysiological_{str(id).zfill(2)}')
summary = df.loc[90:110, centers].mean().to_frame().T
summary.insert(0, 'average', [df.loc[90:110, centers].mean().mean()])
print(summary)
# %%
# 個別にできたサマリを,1つの DataFrame にまとめる。
summaries = []
for id in range(20):
df = pd.read_csv(f'data/psysiological_{str(id).zfill(2)}')
summary = df.loc[90:110, centers].mean().to_frame().T
summary.insert(0, 'average', [df.loc[90:110, centers].mean().mean()])
summaries.append(summary)
summary_all = pd.concat(summaries)
summary_all
# %%
# インデックスをリセットしたい。
summary_all = pd.concat(summaries, ignore_index=True)
summary_all
# %%
# 保存する。
# インデックスの列が今は無名なので,保存時に名前を指定する。
summary_all.round(4).to_csv('saves/summary_psysiological.csv', index_label='id')
# %%
# %% [markdown]
# ## 必要な統計量を人数分まとめた列を必要なだけ用意して,最後に複数の列をまとめる
# %% [markdown]
# 必要な列は,
# 'average', 'CPz', 'Pz', 'Oz', 'POz', 'Fz', 'FCz', 'Cz', 'AFz'。
#
# 各列,人数分のデータを格納したうえで,最後に DataFrame にまとめる。
# %%
# データ格納庫を定義する
columns = ['average', 'CPz', 'Pz', 'Oz', 'POz', 'Fz', 'FCz', 'Cz', 'AFz']
psysiological_data = dict()
for c in columns:
psysiological_data[c] = []
# 上は,以下のコードと同値。
# psysiological_data = {col: [] for col in columns}
# show
psysiological_data
# %%
|
KeiShimon/lecture
|
python-kisoc_day_3/05-01a-複数ファイルの集計.py
|
05-01a-複数ファイルの集計.py
|
py
| 4,108 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
34690028943
|
from django.contrib import admin
from .models import Service, Category, Feature, FeatureItem
class FeatureItemInline(admin.StackedInline):
model = FeatureItem
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ("name", "sub_title")
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name", "title", "body")
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = "name",
search_fields = 'name',
@admin.register(Feature)
class FeatureAdmin(admin.ModelAdmin):
list_display = ('name', 'header_subtitle', 'slug', 'features_list_title')
list_editable = ('header_subtitle', 'slug', 'features_list_title')
prepopulated_fields = {'slug': ('name', )}
search_fields = 'name', 'header_subtitle', 'header_description', 'body'
inlines = [FeatureItemInline]
|
samshultz/techbitsdata
|
services/admin.py
|
admin.py
|
py
| 855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31543778104
|
marks = [[0 for j in range(30)] for i in range(3)]
max_stud = [0,0,0]
for i in range(3):
max_stud[i] = int(input(f"\n\tEnter Maximum students in class {i+1} : "))
print()
if not 0 < max_stud[i] < 31:
print("\tStudents in a class can only be between 1 to 30")
exit(0)
for j in range(max_stud[i]):
marks[i][j] = int(input(f"\tEnter marks of student {j+1} of class {i+1} : "))
for i in range(3):
for j in range(max_stud[i]):
print(f"\tMarks of Student {j+1} of class {i+1} is : {marks[i][j]}")
|
Shobhit0109/programing
|
EveryOther/Practical File/python/P8.py
|
P8.py
|
py
| 554 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1790565048
|
import pdfplumber
import pandas as pd
from babel.numbers import format_currency
def extrair_tabelas(pdf_path):
with pdfplumber.open(pdf_path) as pdf:
# Inicialize uma lista para armazenar todas as tabelas
todas_tabelas = []
# Itera sobre todas as páginas do PDF
for pagina in pdf.pages:
# Extraindo as tabelas da página atual
tabelas_pagina = pagina.extract_tables()
# Adiciona as tabelas à lista
todas_tabelas.extend(tabelas_pagina)
# Retorna a lista de tabelas
return todas_tabelas
# Substitua 'caminho_para_seu_pdf.pdf' pelo caminho real do seu arquivo PDF
caminho_pdf = 'fatura-pdf/xp3.pdf'
tabelas = extrair_tabelas(caminho_pdf)
gastos = []
# Imprime as tabelas
for i, tabela in enumerate(tabelas):
# Verifica se a tabela foi extraída corretamente e se contém a string "Data"
if tabela and any("Data" in row for row in tabela):
df = pd.DataFrame(tabela[1:], columns=tabela[0])
# Imprime o DataFrame linha a linha, coluna a coluna
for indice, linha in df.iterrows():
# Verifica se a primeira coluna está no formato de data (22/10/23)
if pd.to_datetime(linha.iloc[0], errors='coerce', dayfirst=True, format='%d/%m/%y') and pd.notna(pd.to_datetime(linha.iloc[0], errors='coerce', dayfirst=True, format='%d/%m/%y')):
gastos.append([linha.iloc[0], linha.iloc[1], linha.iloc[2]])
# Imprime a lista de gastos de forma mais legível
total = 0.0
for i, gasto in enumerate(gastos):
if "Pagamentos Validos Normais" not in gasto[1]:
print(f"{gasto[0]} {gasto[1]} {gasto[2]}")
valor = gasto[2].replace('.', '').replace(',', '.')
total += float(valor)
# Formata o total da fatura em reais (R$) usando babel
total_formatado = format_currency(total, 'BRL', locale='pt_BR')
print("Total da fatura:", total_formatado)
|
regis-amaral/python-scripts
|
fatura-pdf/reader-fatura.py
|
reader-fatura.py
|
py
| 1,927 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
18797455208
|
# -*- coding:utf-8 -*-
import urllib.request
import urllib.parse
# get取网页数据
def geturl(url, data={}, headers={}):
try:
params = urllib.parse.urlencode(data)
req = urllib.request.Request("%s?%s" % (url, params))
# 设置headers
for i in headers:
req.add_header(i, headers[i])
r = urllib.request.urlopen(req)
html = r.read()
return html.decode("utf8")
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf8"))
def posturl(url, data={}, headers={}):
try:
params = urllib.parse.urlencode(data).encode("utf8")
req = urllib.request.Request(url, params, headers)
r = urllib.request.urlopen(req)
html = r.read()
return html.decode("utf8")
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf8"))
|
yunmenzhe/HttpInterfaceAutoTest
|
utils/httputil.py
|
httputil.py
|
py
| 906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7227006625
|
import tqdm
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src-set', '-src-set', type=str, default=r'/home/v-jiaya/RetrieveNMT/data/MD/en-de/iwslt14-en-de/train/train.en',help='source file')
parser.add_argument('--new-src-set', '-new-tgt-set', type=str, default=r'/home/v-jiaya/fast_align/data/test.en-de',help='source file')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
with open(args.src_set,"r", encoding="utf-8") as src_r:
with open(args.new_src_set, "w", encoding="utf-8") as new_src_w:
print("reading source data file: {}".format(args.src_set))
src_lines = src_r.readlines()
for line_id, src_line in tqdm.tqdm(enumerate(src_lines)):
src_line=src_line.strip()
concat_lines = src_line.split(" [APPEND] ")[1].split(" [SRC] ")
for item in concat_lines:
src, tgt = item.split(" [TGT] ")
new_src_w.write("{} ||| {}\n".format(src, tgt))
new_src_w.flush()
|
CSJianYang/RetrieveNMT
|
RetrieveNMT/SMT/generate_align_data.py
|
generate_align_data.py
|
py
| 1,178 |
python
|
en
|
code
| 3 |
github-code
|
6
|
32069589714
|
# Задание выполнил, но "обходным путем" - через выпрямление списка в ините
# понимаю, что нужно реализовать задание через рекурсию, но не получается выполнить
# В первой реализации развил логику из задания 1, но из-за return возвращается
# только первый элемент вложенного списка
# Вторая реализация - облегченный, с упором на рекурсию, класс, но не получается выполнить
# по той же причине
# подскажите, как обойти return в этих функциях? чтобы цикл не прекращался на этом первом
# return
class FlatIterator:
def __init__(self, list_of_list):
self.list_of_list = self.zart_flatten(list_of_list)
self.n = len(self.list_of_list)
def zart_flatten(self, a):
"""
Non recursive algorithm
Based on pop from old and append elements to new list
"""
queue, out = [a], []
while queue:
elem = queue.pop(-1)
if isinstance(elem, list):
queue.extend(elem)
else:
out.append(elem)
return out[::-1]
def __iter__(self):
self.cursor = -1
return self
def __next__(self):
self.cursor += 1
if self.cursor >= self.n:
raise StopIteration
if isinstance(self.list_of_list[self.cursor], list):
с = FlatIterator(self.list_of_list[self.cursor])
for el in с:
return el
else:
return self.list_of_list[self.cursor]
# class FlatIterator:
# def __init__(self, list_of_list):
# self.list_of_list = list_of_list
# self.n = len(self.list_of_list)
# def __iter__(self):
# self.cursor_outer = 0
# self.cursor_inner = -1
# return self
# def __next__(self):
# self.cursor_inner += 1
# if self.cursor_inner >= len(self.list_of_list[self.cursor_outer]) or self.list_of_list[self.cursor_outer][self.cursor_inner] == []:
# self.cursor_outer += 1
# self.cursor_inner = 0
# if self.cursor_outer >= self.n:
# raise StopIteration
# if isinstance(self.list_of_list[self.cursor_outer][self.cursor_inner], list) and self.list_of_list[self.cursor_outer][self.cursor_inner]:
# for el in FlatIterator(self.list_of_list[self.cursor_outer][self.cursor_inner]):
# return el
# else:
# return self.list_of_list[self.cursor_outer][self.cursor_inner]
# class FlatIterator:
# def __init__(self, list_of_list):
# self.list_of_list = list_of_list
# self.n = len(self.list_of_list)
# def __iter__(self):
# self.cursor = -1
# return self
# def __next__(self):
# self.cursor += 1
# if self.cursor >= self.n:
# raise StopIteration
# if isinstance(self.list_of_list[self.cursor], list):
# с = FlatIterator(self.list_of_list[self.cursor])
# for el in с:
# return el
# else:
# return self.list_of_list[self.cursor]
def test_3():
list_of_lists_2 = [
[['a'], ['b', 'c']],
['d', 'e', [['f'], 'h'], False],
[1, 2, None, [[[[['!']]]]], []]
]
for flat_iterator_item, check_item in zip(
FlatIterator(list_of_lists_2),
['a', 'b', 'c', 'd', 'e', 'f', 'h', False, 1, 2, None, '!']
):
assert flat_iterator_item == check_item
assert list(FlatIterator(list_of_lists_2)) == ['a', 'b', 'c', 'd', 'e', 'f', 'h', False, 1, 2, None, '!']
if __name__ == '__main__':
test_3()
# list_of_lists_2 = [
# [['a'], ['b', 'c']],
# ['d', 'e', [['f'], 'h'], False],
# [1, 2, None, [[[[['!']]]]], []]
# ]
# for el in FlatIterator(list_of_lists_2):
# print(el)
|
sokkos1995/PYDA
|
module5_adanced_python/hw4/task3.py
|
task3.py
|
py
| 4,187 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
17689672862
|
import torch
from torch.nn import Module, Conv2d, LeakyReLU, PReLU, BatchNorm2d, Sequential, PixelShuffle, AdaptiveAvgPool2d, Flatten, Linear, Dropout2d, Dropout
class ResidualUnit(Module):
def __init__(self):
super(ResidualUnit, self).__init__()
self.conv1 = Sequential(Conv2d(64, 64, 3, 1, "same"), BatchNorm2d(64), PReLU(64))
self.conv2 = Sequential(Conv2d(64, 64, 3, 1, "same"), BatchNorm2d(64))
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
return input + out
class UpsampleUnit(Module):
def __init__(self):
super(UpsampleUnit, self).__init__()
self.conv = Conv2d(64, 256, 3, 1, "same")
self.shuffle = PixelShuffle(2)
self.activation = PReLU(64)
def forward(self, X):
return self.activation(self.shuffle(self.conv(X)))
class Generator(Module):
def __init__(self, no_resBlocks):
super(Generator, self).__init__()
self.residuals = Sequential(*[ResidualUnit()] * no_resBlocks)
self.upsample = Sequential(UpsampleUnit(), UpsampleUnit())
self.initialConv = Sequential(Conv2d(3, 64, 9, 1, "same"), PReLU(64))
self.midConv = Sequential(Conv2d(64, 64, 3, 1, "same"), BatchNorm2d(64))
self.finalConv = Conv2d(64, 3, 9, 1, "same")
def forward(self, input):
input = self.initialConv(input)
out = self.residuals(input)
out = self.midConv(out)
out = out + input
out = self.upsample(out)
out = self.finalConv(out)
return torch.tanh(out)
class DiscConvBlock(Module):
def __init__(self, in_channels, out_channels, stride):
super(DiscConvBlock, self).__init__()
self.conv = Conv2d(in_channels, out_channels, 3, stride, 1)
self.bn = BatchNorm2d(out_channels)
self.activation = LeakyReLU(0.2)
self.dropout = Dropout2d(p=0.50)
def forward(self, X):
return self.dropout(self.activation(self.bn(self.conv(X))))
class Discriminator(Module):
def __init__(self):
super(Discriminator, self).__init__()
self.initial_conv = Sequential(
Conv2d(3, 64, 3, 1, "same"),
LeakyReLU(0.2),
Dropout2d(p=0.5)
)
self.conv_seq = Sequential(
DiscConvBlock(64, 64, 2),
DiscConvBlock(64, 128, 1),
DiscConvBlock(128, 128, 2),
DiscConvBlock(128, 256, 1),
DiscConvBlock(256, 256, 2),
DiscConvBlock(256, 512, 1),
DiscConvBlock(512, 512, 2),
AdaptiveAvgPool2d(1),
Flatten()
)
self.fc = Sequential(
Linear(512, 1024),
LeakyReLU(0.2),
Dropout(0.50),
Linear(1024, 1)
)
def forward(self, X):
return self.fc(self.conv_seq(self.initial_conv(X)))
|
abed11326/Training-a-Super-Resolution-GAN-for-4x-image-upscaling
|
models.py
|
models.py
|
py
| 2,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35007925134
|
from src.main.python.Solution import Solution
# Follow up for "Remove Duplicates":
# What if duplicates are allowed at most twice?
#
# For example,
# Given sorted array nums = [1,1,1,2,2,3],
#
# Your function should return length = 5, with the first five elements of nums being 1, 1, 2, 2 and 3.
# It doesn't matter what you leave beyond the new length.
class Q080(Solution):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
len, cnt = 0, 0
if nums:
for i, item in enumerate(nums):
if not i or item != nums[i-1]:
nums[len] = item
len += 1
cnt = 1
else:
cnt += 1
if cnt <= 2:
nums[len] = item
len += 1
return len
|
renkeji/leetcode
|
python/src/main/python/Q080.py
|
Q080.py
|
py
| 904 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12100194486
|
import unittest
import itertools
from functools import partial
from bst import BST
def _factory(l):
_b = BST(l[0])
for item in l[1:]:
_b.insert(item)
return _b
class TestBST(unittest.TestCase):
def _check_node(self, node, item, left_child, right_child):
self.assertEqual(item, node.item)
if left_child is None:
self.assertIsNone(node.left)
else:
self.assertEqual(left_child, node.left.item)
if right_child is None:
self.assertIsNone(node.right)
else:
self.assertEqual(right_child, node.right.item)
def test_sanity(self):
# Act
b = BST(1)
# Assert
self.assertEqual(1, b.item)
self.assertEqual(None, b.left)
self.assertEqual(None, b.right)
def test_insert_1(self):
# Arrange + Act
b = BST(1)
b.insert(2)
b.insert(3)
# Assert
self._check_node(b, 1, None, 2)
self._check_node(b.right, 2, None, 3)
self._check_node(b.right.right, 3, None, None)
def test_insert_2(self):
# Arrange + Act
b = BST(1)
b.insert(3)
b.insert(2)
# Assert
self._check_node(b, 1, None, 3)
self._check_node(b.right, 3, 2, None)
self._check_node(b.right.left, 2, None, None)
def test_insert_3(self):
# Arrange + Act
b = BST(2)
b.insert(1)
b.insert(3)
# Assert
self._check_node(b, 2, 1, 3)
self._check_node(b.left, 1, None, None)
self._check_node(b.right, 3, None, None)
def test_insert_4(self):
# Arrange + Act
b = BST(2)
b.insert(3)
b.insert(1)
# Assert
self._check_node(b, 2, 1, 3)
self._check_node(b.left, 1, None, None)
self._check_node(b.right, 3, None, None)
def test_insert_5(self):
# Arrange + Act
b = BST(3)
b.insert(1)
b.insert(2)
# Assert
self._check_node(b, 3, 1, None)
self._check_node(b.left, 1, None, 2)
self._check_node(b.left.right, 2, None, None)
def test_insert_6(self):
# Arrange + Act
b = BST(3)
b.insert(2)
b.insert(1)
# Assert
self._check_node(b, 3, 2, None)
self._check_node(b.left, 2, 1, None)
self._check_node(b.left.left, 1, None, None)
# Now that we tested insert(), we can use _factory!
def test__eq__when_equal(self):
# Arrange
b1 = _factory([2,3,1])
b2 = _factory([2,1,3])
#Assert
self.assertEqual(b1, b2)
def test__eq__when_not_equal(self):
# Arrange
l1 = _factory([1,2,3])
l2 = _factory([1,3,2])
#Assert
self.assertFalse(l1 == l2)
def test_search_contains_return_list(self):
# Arrange
b = _factory([1,3,2,4])
expected = _factory([3,4,2])
# Act
actual = b.search(3)
# Assert
self.assertEqual(expected, actual)
def test_search_not_contains_return_None(self):
# Arrange
b = _factory([1,2,3,4,5])
# Act + Assert
self.assertIsNone(b.search(6))
def test_min_max(self):
for perm in itertools.permutations(range(5)):
self.assertEqual(0, _factory(perm).min().item)
self.assertEqual(4, _factory(perm).max().item)
def test_inorder(self):
b = _factory([2,1,3])
self.assertEqual([1,2,3], b.inorder())
def test_preorder(self):
b = _factory([2,1,3])
self.assertEqual([2,1,3], b.preorder())
def test_posorder(self):
b = _factory([2,1,3])
self.assertEqual([1,3,2], b.postorder())
def test_search_parent(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([4,3,6,5]), b._search_parent(6))
self.assertEqual(_factory([4,3,6,5]), b._search_parent(3))
def test_delete_delete_childess_node(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([2,1,7,8,4,6,5]), b.delete(3))
def test_delete_delete_node_with_one_child_left(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([2,1,7,8,4,3,5]), b.delete(6))
def test_delete_delete_node_with_one_child_right(self):
b = _factory([2,1,7,8,4,3,6,5,9])
self.assertEqual(_factory([2,1,7,4,3,6,5,9]), b.delete(8))
def test_delete_delete_node_with_two_children(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([2,1,7,8,5,3,6]), b.delete(4))
def test_delete_delete_node_with_two_children_min_with_right_subtree(self):
b = _factory([2,1,7,8,4,3,6,5,5.8,5.7,5.9])
self.assertEqual(_factory([2,1,7,8,5,3,6,5.8,5.7,5.9]), b.delete(4))
def test__repr__(self):
# Arrange
b = _factory([1,3,2])
# Act + Assert
self.assertEqual('BST(item=1, left=None, right=BST(item=3, left=BST(item=2, left=None, right=None), right=None))', b.__repr__())
if __name__ == '__main__':
unittest.main()
|
Shaywei/MyDevTools
|
Python/BasicDataStructures/bst_tests.py
|
bst_tests.py
|
py
| 5,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38949670839
|
class Solution:
# @param {string} s A string
# @return {boolean} whether the string is a valid parentheses
def isValidParentheses(self, s):
stack = []
dict = {')':'(', '}':'{', ']':'['}
for ch in s:
if ch in dict.values():
stack.append(ch)
elif ch in dict.keys():
if stack == [] or dict[ch] != stack.pop():
return False
else:
return False
return stack == []
|
sublingbling/coding-everyday
|
10_26_2016_valid-parentheses/chang.py
|
chang.py
|
py
| 523 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36822122944
|
import random
from turtle import Turtle, Screen
class Blocks(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.shape("square")
self.turtlesize(stretch_len=3, stretch_wid=1)
self.goto(x=-450, y=0)
self.row_num = {1: 0, 2: 25, 3: 50, 4: 75, 5: 100, 6: 125, 7: 150, 8: 175, 9: 200, 10: 225}
self.row_colors = {1: "#FF0000", 2: "#FF3EC0", 3: "#00FF00", 4: "#FFFF33", 5: "#FF8000",
6: "#00FFFF", 7: "#CC0000", 8: "#0000CC", 9: "#990099", 10: "#CCFFFF"}
self.all_blocks = []
def create_row(self, row):
last_block_x = [-550]
for block in range(10):
block = Blocks()
block.penup()
block.color(self.row_colors[row])
block.sety(self.row_num[row])
block.setx(last_block_x[-1] + 99)
last_block_x.append(block.xcor())
self.all_blocks.append(block)
def destroy_block(self, block):
self.all_blocks.pop(self.all_blocks.index(block))
block.color("black")
block.speed("fastest")
block.goto(x=1000, y=1000)
def create_level(self, level):
for row in range(level):
rand_row = random.randint(1, 10)
self.create_row(rand_row)
|
guitarkeegan/breakout-game
|
blocks.py
|
blocks.py
|
py
| 1,302 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69895141309
|
# -*- encoding: utf-8 -*-
"""
lunaport.domain.line
~~~~~~~~~~~~~~~~~~~~
Line related business logic
"""
import string
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from base import BaseFactory, BaseAdaptor, BaseEntrie
class Line(BaseEntrie):
"""
Line(power line or queue) - district of datacenter.
"""
attr_required = [
'id',
'name',
'dc',
]
attr_optional = []
#'dc',
#]
class LineAdaptor(BaseAdaptor):
__just_inherit = True
class LineBuilder(BaseFactory):
""" Line instance static fabric.
"""
target_struct = Line
req_attr_allowed = [
'id',
'name',
]
req_attr_allowed_set = set(req_attr_allowed)
@classmethod
def from_Flask_req(cls, r, session):
""" Creates class instance from Flask request object.
Args:
r: Flask request object.
session: Flask session object.
Returns:
Ammo class instance.
"""
strip_dc = lambda name: ''.join(
[el for el in name if el in string.ascii_lowercase])
msg_rvd = cls.parse_flask_req(r, session)
msg_rvd.update({
'dc': {'name': strip_dc(msg_rvd['name'])}
})
return Line(**msg_rvd)
@classmethod
def from_row(cls, **row):
"""Creates class instance from RDBMS returned row.
Args:
row: dict with table columns as keys.
Returns:
*Line* class instance.
"""
if row.get('dc_id'):
row.setdefault('dc', {})
row['dc']['id'] = row.get('dc_id')
del row['dc_id']
if row.get('dc_name'):
row.setdefault('dc', {})
row['dc']['name'] = row.get('dc_name')
del row['dc_name']
return Line(**row)
|
greggyNapalm/lunaport_server
|
lunaport_server/domain/line.py
|
line.py
|
py
| 1,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33262623475
|
"""
1) Создайте текстовое поле.
2) Попросите пользователя ввести в консоли произвольную строку.
3) Выведите эту строку в текстовом поле окна.
Примечание: запрос строки и её вывод в текстовом поле должны происходить до mainloop().
"""
from tkinter import *
def setWindow(root):
root.title("Окно программы") # Задаем название окна
root.resizable(False, False) # Запрещаем изменение размеров окна по горизонтали и вертикали
w = 800 # Задаем ширину окна
h = 600 # Задаем высоту окна
ws = root.winfo_screenwidth() # определяем разрешение экрана по ширине
wh = root.winfo_screenheight() # определяем разрешение экрана по высоте
x = int(ws / 2 - w / 2) # Вычисляем x для вывода окна в средине экрана
y = int(wh / 2 - h / 2) # Вычисляем y для вывода окна в средине экрана
root.geometry("{0}x{1}+{2}+{3}".format(w, h, x, y)) # Выводим окно в центре экрана
root = Tk() # Создаем класс окна
setWindow(root)
str = input("Введите строку: ")
entry = Entry(root, font="Tahoma 18", bg="#ABFF99", fg="Red", bd=4) # Создание текстового поля 1
entry.insert(END, str)
entry.pack()
root.mainloop() # Цикл вызова окна
|
kuzbassghost/Course
|
BaseRus/GUI/Homework_4.py
|
Homework_4.py
|
py
| 1,674 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
20855466611
|
import requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
import os
from decouple import config
from prettyprinter import pprint
import GUI
# For the API documentation go to
# https://coinmarketcap.com/api/documentation/v1/#section/Quick-Start-Guide
API_KEY = config("API_KEY")
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start': '1',
'limit': '50',
'convert': 'USD',
"CMC_PRO_API_KEY": API_KEY,
}
my_portfolio = [
{
"symbol": "BTC",
"amount_owned": 2000,
"price_payed_per_unit": 20.0
},
{
"symbol": "ETH",
"amount_owned": 500,
"price_payed_per_unit": 2.0
},
{
"symbol": "XPR",
"amount_owned": 1500,
"price_payed_per_unit": 0.1
},
{
"symbol": "XLM",
"amount_owned": 2000,
"price_payed_per_unit": 0.2
},
{
"symbol": "EOS",
"amount_owned": 1000,
"price_payed_per_unit": 2.0
},
]
def get_api_info():
api_request = requests.get(url=url, params=parameters)
data = json.loads(api_request.content)
return data["data"]
def add_coin_msg_to_gui(coin_msg):
coin_frame = GUI.Frame(GUI.second_frame,
relief='ridge',
borderwidth=2,
bg='#F2F2F2')
coin_frame.pack(
anchor='center',
pady=(10, 0),
padx=(50, 50),
fill='x'
)
display_coin_msg = GUI.Label(coin_frame,
text=coin_msg,
anchor='w',
font=('Time New Roman', '13', 'bold underline'),
bg='#F2F2F2')
display_coin_msg.pack(fill='x',
padx=10)
def show_portfolio_profit_loss_on_gui(profit_loos):
# view client version on bottom left corner
if profit_loos > 0:
color = "green"
else:
color = "red"
portfolio_profit_loos_label = GUI.Label(GUI.root,
text="Portfolio Total Profit/Loos: ${0:.2f}".format(profit_loos),
font=('Time New Roman', '9', 'bold'),
bg="white",
fg=color)
portfolio_profit_loos_label.pack(pady=(5, 0),
anchor='e')
def format_data():
try:
coins = get_api_info()
portfolio_profit_loos = 0
for coin in coins:
for sym in my_portfolio:
if sym["symbol"] == coin["symbol"]:
total_paid = sym["price_payed_per_unit"] * sym["amount_owned"]
total_current_value = sym["amount_owned"] * float(coin["quote"]["USD"]["price"])
profit = total_current_value - total_paid
profit_percentage = profit / total_paid * 100
profit_per_coin = float(coin["quote"]["USD"]["price"]) - sym["price_payed_per_unit"]
my_coin_msg = f'Name: {coin["name"]} \n' \
f'Symbol: {coin["symbol"]} \n' \
f'Rank: {coin["cmc_rank"]} \n' \
f'Current Price: ${float(coin["quote"]["USD"]["price"]):.2f} \n' \
f'24 Hour Change: {float(coin["quote"]["USD"]["percent_change_24h"]):.2f}% \n' \
f'Paid per coin: ${sym["price_payed_per_unit"]:.2f} \n' \
f'Amount Owned: {sym["amount_owned"]} units \n' \
f'Total current value: ${total_current_value:.2f} \n' \
f'Total Paid: ${total_paid:.2f} \n' \
f'Profit/Loss per coin:${profit_per_coin:.2f} \n' \
f'Profit/Loss: ${profit:.2f} \n' \
f'Profit/Loss percentage: {profit_percentage:.2f}%'
portfolio_profit_loos += profit
add_coin_msg_to_gui(my_coin_msg)
show_portfolio_profit_loss_on_gui(portfolio_profit_loos)
except (ConnectionError, Timeout, TooManyRedirects) as e:
pprint(e)
if __name__ == "__main__":
# Clear command line window
os.system('cls')
format_data()
GUI.root.mainloop()
|
edumarg/cyrpto_currency_portfolio
|
main.py
|
main.py
|
py
| 4,492 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27773506390
|
import os
import asyncio
from telepyrobot.setclient import TelePyroBot
from pyrogram import filters
from pyrogram.types import Message, ChatPermissions
from telepyrobot import COMMAND_HAND_LER
from telepyrobot.utils.admin_check import admin_check
__PLUGIN__ = os.path.basename(__file__.replace(".py", ""))
__help__ = f"""
Commands to help you manage a chat.
`{COMMAND_HAND_LER}leavechat`: Exit from the Group.
Usage: {COMMAND_HAND_LER}leavechat
`{COMMAND_HAND_LER}invitelink`: Gives the invitelink of the Group.
Usage: {COMMAND_HAND_LER}invitelink
`{COMMAND_HAND_LER}setchatpic`: Changes the Picture of Group.
Usage: {COMMAND_HAND_LER}setchatpic (as a reply to the message)
`{COMMAND_HAND_LER}delchatpic`: Removes the Picture of Group.
Usage: {COMMAND_HAND_LER}delchatpic (as a reply to the message)
`{COMMAND_HAND_LER}setchatname`: Renames the Group.
Usage: {COMMAND_HAND_LER}setchatname (chatname or as a reply to the message)
`{COMMAND_HAND_LER}setchatdesc`: Sets the Description of the Group.
Usage: {COMMAND_HAND_LER}setchatdesc (chatdesc or as a reply to the message)
"""
@TelePyroBot.on_message(filters.command("leavechat", COMMAND_HAND_LER) & filters.me)
async def leavechat(c: TelePyroBot, m: Message):
if m.chat.type in ["group", "supergroup"]:
chat_id = m.chat.id
is_admin = await admin_check(c, m)
if not is_admin:
return
await c.leave_chat(chat_id, delete=True)
return
@TelePyroBot.on_message(filters.command("invitelink", COMMAND_HAND_LER) & filters.me)
async def invitelink(c: TelePyroBot, m: Message):
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
link = await c.export_chat_invite_link(chat_id)
await m.edit_text(f"**Link for Chat:**\n`{link}`")
return
@TelePyroBot.on_message(filters.command("setchatpic", COMMAND_HAND_LER) & filters.me)
async def set_picture(c: TelePyroBot, m: Message):
if m.chat.type in ["group", "supergroup"]:
is_admin = await admin_check(c, m)
if not is_admin:
return
await m.edit_text("`Tring to Change Group Picture....`")
chat_id = m.chat.id
try:
if m.reply_to_message and m.reply_to_message.media:
file_id = m.reply_to_message.photo.file_id
file_ref = m.reply_to_message.photo.file_ref
await c.set_chat_photo(chat_id, file_id, file_ref=file_ref)
await m.edit_text(f"`{m.chat.type.title()} picture has been set.`")
else:
await m.edit_text("`Reply to an image to set that as group pic`")
except Exception as ef:
await m.edit_text(f"**Could not Change Chat Pic due to:**\n`{ef}`")
return
@TelePyroBot.on_message(filters.command("delchatpic", COMMAND_HAND_LER) & filters.me)
async def delchatpic(c: TelePyroBot, m: Message):
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
try:
await c.delete_chat_photo(chat_id)
await m.edit_text(f"`Deleted Chat Picture for {m.chat.title}`")
except Exception as ef:
await m.edit_text(f"Error deleting Chat Pic due to:\n`{ef}`")
@TelePyroBot.on_message(filters.command("setchatname", COMMAND_HAND_LER) & filters.me)
async def setchatname(c: TelePyroBot, m: Message):
await m.edit_text("__Trying to Change Chat Name!__")
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
chat_title = m.text.split(None, 1)
if m.reply_to_message:
chat_title = m.reply_to_message.text
else:
chat_title = chat_title[1]
try:
await c.set_chat_title(chat_id, chat_title)
await m.edit_text(f"<b>Changed Chat Name to:</b> <code>{chat_title}</code>")
except Exception as ef:
await m.edit_text(f"**Could not Change Chat Title due to:**\n`{ef}`")
@TelePyroBot.on_message(filters.command("setchatdesc", COMMAND_HAND_LER) & filters.me)
async def setchatdesc(c: TelePyroBot, m: Message):
await m.edit_text("__Trying to Change Chat Desciption!__")
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
chat_desc = m.text.split(None, 1)
if m.reply_to_message:
chat_desc = m.reply_to_message.text
else:
chat_desc = chat_desc[1]
try:
await c.set_chat_description(chat_id, chat_desc)
await m.edit_text(
f"<b>Changed Chat Description to:</b> <code>{chat_desc}</code>"
)
except Exception as ef:
await m.edit_text(f"**Could not Change Chat Desciption due to:**\n`{ef}`")
|
Divkix/TelePyroBot
|
telepyrobot/plugins/chat.py
|
chat.py
|
py
| 4,652 |
python
|
en
|
code
| 40 |
github-code
|
6
|
34351667294
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('CCLhora2.csv')
data1 = pd.read_csv('CCLdia2.csv')
data2 = pd.read_csv('CCLsemana2.csv')
data3 = pd.read_csv('CCLmes2.csv')
data4 = pd.read_csv('CCLaño2.csv')
data["date"] = pd.to_datetime(data["date"], unit='ms')
data1["date"] = pd.to_datetime(data1["date"], unit='ms')
data2["date"] = pd.to_datetime(data2["date"], unit='ms')
data3["date"] = pd.to_datetime(data3["date"], unit='ms')
data4["date"] = pd.to_datetime(data4["date"], unit='ms')
plt.rcParams["figure.figsize"] = (12,12)
plt.plot(data['date'], data['tamaño'])
plt.plot(data1['date'], data1['tamaño'])
plt.plot(data2['date'], data2['tamaño'])
plt.plot(data3['date'], data3['tamaño'])
plt.plot(data4['date'], data4['tamaño'])
plt.ylabel('Weight of the LCC-')
plt.xlabel('Time')
plt.savefig('fgtodosc2.png')
|
pedrolf8/MastodonTFG
|
paso.py
|
paso.py
|
py
| 869 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74087520829
|
# -*- coding: utf-8 -*-
import re
import datetime
import bs4
import scrapy
from scrapy_wssc.Item.BookContentItem import BookContentItem
from scrapy_wssc.Item.BookItem import BookItem
class mobile_spider(scrapy.Spider):
name = 'mobile_spider'
def __init__(self, bid=None):
"""初始化起始页面和游戏bid
"""
super(mobile_spider, self).__init__()
self.bid = bid # 参数bid由此传入
self.start_urls = ['https://m.qu.la/wapsort/4_1.html'] #历史小说 1
#'https://www.qu.la/xuanhuanxiaoshuo/',#玄幻小说 2
# 'https://www.qu.la/dushixiaoshuo/'] #都市小说 3
self.allowed_domain = 'm.qu.la'
#self.driver = webdriver.Chrome(
# executable_path="C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe")
#self.driver.set_page_load_timeout(10) # throw a TimeoutException when thepage load time is more than 5 seconds.
#self.bookService = BookServiceImpl()
def parse(self, response):
pattern = re.compile(r'\d+')
book_list = response.xpath('//div[@class="recommend"]/div[@id="main"]/div')
for li in book_list:
bookItem = BookItem();
bookItem['id'] = pattern.search(li.xpath('a/@href').extract()[0]).group()
bookItem['cateId'] = 1
bookItem['name'] = li.xpath('a/p[@class="title"]/text()').extract()[0].strip()
bookItem['author'] = li.xpath('a/p[@class="author"]/text()').extract()[0].split(u':')[1]
bookItem['isHot'] = True
bookItem['isSerial'] = True
bookItem['status'] = 1
bookItem['lastUpdate'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
bookItem['describe'] = li.xpath('p[@class="review"]/text()').extract()[1].split(u':')[1].strip()
bookItem['bookUrl'] = 'https://m.qu.la'+li.xpath('a/@href').extract()[0]
bookItem['create_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
yield bookItem
#爬取章节
yield scrapy.Request(url='https://m.qu.la/booklist/'+bookItem['id']+'.html',callback=self.get_book_chapter_list, meta={"cateId":1})
def get_book_chapter_list(self,response):
soup = bs4.BeautifulSoup(response.text, 'lxml')
chapterList = soup.find('div',id="chapterlist").p
for chapter in chapterList:
pass
def get_book_info(self, response):
pattern = re.compile(r'\d+')
soup = bs4.BeautifulSoup(response.text, 'lxml')
bookItem = BookItem();
bookItem['id'] = pattern.search(soup.find('div',id="info").find('a',{"style":"color:red;"}).attrs['href']).group()
bookItem['cateId'] = response.meta['cateId']
bookItem['name'] = soup.find('div',id="info").h1.get_text()
bookItem['author'] = soup.find('div',id="info").p.get_text().split(u':' )[1]
bookItem['isHot'] = True
bookItem['isSerial'] = True
bookItem['status'] = 1
bookItem['lastUpdate'] = soup.find('div',id="info").find_all('p')[2].get_text().split(u':' )[1]
bookItem['describe'] = soup.find('div',id="intro").get_text().replace(" ", "")
bookItem['bookUrl'] = response.request.url
bookItem['create_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
yield bookItem
book_content_list = response.xpath('//div[@id="list"]/dl/dd')
for con_li in book_content_list:
con_url = con_li.xpath('a/attribute::href').extract()[0]
if con_url.startswith('/book/'):
yield scrapy.Request(url='https://www.qu.la' + con_url, callback=self.get_book_content,
meta={'url': 'https://www.qu.la' + con_url, "bookId": bookItem["id"]})
def get_book_content(self,response):
pattern = re.compile(r'^(https://www.qu.la/.*?)(\d+)(.html)$')
soup = bs4.BeautifulSoup(response.text, 'lxml')
bookContentItem = BookContentItem();
bookContentItem['id'] = pattern.search(response.meta['url']).group(2)
bookContentItem['bookId'] = response.meta['bookId']
bookContentItem['title'] = soup.find('div',attrs={"class":"bookname"}).h1.get_text()
bookContentItem['content'] = soup.find('div',id="content").get_text()
bookContentItem['linkUrl'] = response.meta['url']
bookContentItem['createDate'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
yield bookContentItem
|
chenrunhu/wssc_scrapy
|
scrapy_wssc/spiders/mobile_spider.py
|
mobile_spider.py
|
py
| 4,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19412768499
|
def get_serialized_rows_by_id(cls, validated_params, rqst_errors):
rqst_staff_id = validated_params['id']
if rqst_staff_id != 'all':
list_of_ids = validated_params['id_list']
else:
list_of_ids = None
navigator_qset = filter_navigator_qset_by_id(cls.objects.all(), rqst_staff_id, list_of_ids)
navigator_qset = filter_db_objects_by_secondary_params(navigator_qset, validated_params)
response_list = create_response_list_from_db_objects(navigator_qset)
def check_response_data_for_requested_data():
if not response_list:
rqst_errors.append("No navigator instances in db for given ids")
else:
if list_of_ids:
for db_id in list_of_ids:
tuple_of_bools_if_id_in_data = (instance_data['id'] == db_id for instance_data in response_list)
if not any(tuple_of_bools_if_id_in_data):
rqst_errors.append('Navigator instance with id: {} not found in database'.format(db_id))
check_response_data_for_requested_data()
return response_list
def get_serialized_rows_by_f_and_l_name(cls, validated_params, rqst_errors):
rqst_first_name = validated_params['first_name']
rqst_last_name = validated_params['last_name']
navigator_qset = filter_navigator_objs_by_f_and_l_name(cls.objects.all(), rqst_first_name, rqst_last_name)
navigator_qset = filter_db_objects_by_secondary_params(navigator_qset, validated_params)
response_list = create_response_list_from_db_objects(navigator_qset)
def check_response_data_for_requested_data():
if not response_list:
rqst_errors.append("No navigator instances in db for given first and last name")
check_response_data_for_requested_data()
response_list = [response_list]
return response_list
def get_serialized_rows_by_email(cls, validated_params, rqst_errors):
list_of_emails = validated_params['email_list']
response_list = []
for email in list_of_emails:
filtered_navigator_qset = filter_navigator_objs_by_email(cls.objects.all(), email)
filtered_navigator_qset = filter_db_objects_by_secondary_params(filtered_navigator_qset, validated_params)
response_list_component = create_response_list_from_db_objects(filtered_navigator_qset)
def check_response_component_for_requested_data():
if not response_list_component:
rqst_errors.append('Navigator instance with email: {} not found in database'.format(email))
check_response_component_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list
def get_serialized_rows_by_first_name(cls, validated_params, rqst_errors):
list_of_first_names = validated_params['first_name_list']
response_list = []
for first_name in list_of_first_names:
filtered_navigator_qset = filter_navigator_objs_by_first_name(cls.objects.all(), first_name)
filtered_navigator_qset = filter_db_objects_by_secondary_params(filtered_navigator_qset, validated_params)
response_list_component = create_response_list_from_db_objects(filtered_navigator_qset)
def check_response_component_for_requested_data():
if not response_list_component:
rqst_errors.append('Navigator instance with first name: {} not found in database'.format(first_name))
check_response_component_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list
def get_serialized_rows_by_last_name(cls, validated_params, rqst_errors):
list_of_last_names = validated_params['last_name_list']
response_list = []
for last_name in list_of_last_names:
filtered_navigator_qset = filter_navigator_objs_by_last_name(cls.objects.all(), last_name)
filtered_navigator_qset = filter_db_objects_by_secondary_params(filtered_navigator_qset, validated_params)
response_list_component = create_response_list_from_db_objects(filtered_navigator_qset)
def check_response_component_for_requested_data():
if not response_list_component:
rqst_errors.append('Navigator instance with last name: {} not found in database'.format(last_name))
check_response_component_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list
def get_serialized_rows_by_county(cls, validated_params, rqst_errors):
list_of_counties = validated_params['county_list']
response_list = []
for county in list_of_counties:
filtered_navigator_qset = filter_navigator_objs_by_county(cls.objects.all(), county)
filtered_navigator_qset = filter_db_objects_by_secondary_params(filtered_navigator_qset, validated_params)
response_list_component = create_response_list_from_db_objects(filtered_navigator_qset)
def check_response_component_for_requested_data():
if not response_list_component:
rqst_errors.append('Navigator instances with a default county of: {} not found in database'.format(county))
check_response_component_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list
def get_serialized_rows_by_region(cls, validated_params, rqst_errors):
list_of_regions = validated_params['region_list']
response_list = []
counties_mapped_to_regions = cls.REGIONS
for region in list_of_regions:
if region not in counties_mapped_to_regions:
rqst_errors.append("{} is not a valid region stored in the db.".format(region))
else:
counties_in_this_region = counties_mapped_to_regions[region]
response_list_component = []
for county in counties_in_this_region:
def add_staff_data_from_county_to_response_component():
filtered_navigator_qset = filter_navigator_objs_by_county(cls.objects.all(), county)
filtered_navigator_qset = filter_db_objects_by_secondary_params(filtered_navigator_qset, validated_params)
staff_data_for_this_county = create_response_list_from_db_objects(filtered_navigator_qset)
for staff_data in staff_data_for_this_county:
response_list_component.append(staff_data)
add_staff_data_from_county_to_response_component()
def check_response_component_for_requested_data():
if not response_list_component:
rqst_errors.append('Navigator instances with a default county in region: {} not found in database'.format(region))
check_response_component_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list
def get_serialized_rows_by_mpn(cls, validated_params, rqst_errors):
list_of_mpns = validated_params['mpn_list']
response_list = []
for mpn in list_of_mpns:
filtered_navigator_qset = filter_navigator_objs_by_mpn(cls.objects.all(), mpn)
filtered_navigator_qset = filter_db_objects_by_secondary_params(filtered_navigator_qset, validated_params)
response_list_component = create_response_list_from_db_objects(filtered_navigator_qset)
def check_response_component_for_requested_data():
if not response_list_component:
rqst_errors.append('Navigator instance with MPN: {} not found in database'.format(mpn))
check_response_component_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list
def create_response_list_from_db_objects(db_objects):
return_list = []
for db_instance in db_objects:
return_list.append(db_instance.return_values_dict())
return return_list
def filter_db_objects_by_secondary_params(db_objects, validated_get_params):
if 'approved_cm_client_id_list' in validated_get_params:
list_of_cm_client_ids = validated_get_params['approved_cm_client_id_list']
db_objects = db_objects.filter(approved_cm_clients__in=list_of_cm_client_ids)
return db_objects
def prefetch_related_rows(db_queryset):
db_queryset = db_queryset.select_related(
'address',
'address__country',
)
db_queryset = db_queryset.prefetch_related(
"approved_clients_for_case_management",
'picconsumer_set',
'base_locations',
'base_locations__address',
'base_locations__address__country',
'credentialsmodel_set',
"healthcare_locations_worked",
"healthcare_service_expertises",
"insurance_carrier_specialties",
"resume_set",
"resume_set__education_set",
"resume_set__job_set",
)
return db_queryset
def filter_navigator_qset_by_id(db_queryset, rqst_id, list_of_ids):
db_queryset = prefetch_related_rows(db_queryset)
if isinstance(rqst_id, str) and rqst_id.lower() == "all":
db_queryset = db_queryset.order_by("id")
else:
db_queryset = db_queryset.filter(id__in=list_of_ids).order_by("id")
return db_queryset
def filter_navigator_objs_by_f_and_l_name(db_queryset, rqst_first_name, rqst_last_name):
db_queryset = prefetch_related_rows(db_queryset)
db_queryset = db_queryset.filter(first_name__iexact=rqst_first_name, last_name__iexact=rqst_last_name).order_by(
"last_name", "first_name")
return db_queryset
def filter_navigator_objs_by_first_name(db_queryset, rqst_first_name):
db_queryset = prefetch_related_rows(db_queryset)
db_queryset = db_queryset.filter(first_name__iexact=rqst_first_name).order_by("first_name")
return db_queryset
def filter_navigator_objs_by_last_name(db_queryset, rqst_last_name):
db_queryset = prefetch_related_rows(db_queryset)
db_queryset = db_queryset.filter(last_name__iexact=rqst_last_name).order_by("last_name")
return db_queryset
def filter_navigator_objs_by_email(db_queryset, rqst_email):
db_queryset = prefetch_related_rows(db_queryset)
db_queryset = db_queryset.filter(email__iexact=rqst_email).order_by("email")
return db_queryset
def filter_navigator_objs_by_county(db_queryset, rqst_county):
db_queryset = prefetch_related_rows(db_queryset)
db_queryset = db_queryset.filter(county__iexact=rqst_county).order_by("county")
return db_queryset
def filter_navigator_objs_by_mpn(db_queryset, rqst_mpn):
db_queryset = prefetch_related_rows(db_queryset)
db_queryset = db_queryset.filter(mpn__iexact=rqst_mpn).order_by("mpn")
return db_queryset
|
bbcawodu/careadvisors-backend
|
picmodels/models/care_advisors/navigator_models/services/read.py
|
read.py
|
py
| 11,586 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44730303411
|
import os
import discord
from discord.utils import get
from discord.ext import commands, tasks
from dotenv import load_dotenv
import random
import re
import time
import requests
load_dotenv()
OMDB_KEY = os.getenv('OMDB_KEY')
STREAMING_KEY = os.getenv('STREAMING_KEY')
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
image_types = ["png", "jpeg", "gif", "jpg", "mp4", "mov"]
client = discord.Client(command_prefix = 'sponge', intents = discord.Intents.all())
intents = discord.Intents.default()
intents.message_content = True
bot = commands.Bot(command_prefix = '!', intents = intents)
@bot.event
async def on_ready():
print(f'logged in as {bot.user.name}')
@bot.command(name='shit')
async def random_image(ctx, source_channel_id = 815464674404990988, source_server_id = 735280881664655410, target_server_id = 718995069847470141, target_channel_id = 718995070316970046):
try:
source_server = discord.utils.get(bot.guilds, id = source_server_id)
target_server = discord.utils.get(bot.guilds, id = target_server_id)
source_channel = discord.utils.get(source_server.channels, id = source_channel_id)
target_channel = discord.utils.get(target_server.channels, id = target_channel_id)
messages = []
async for message in source_channel.history(limit = 500):
messages.append(message)
image_messages = [message for message in messages if message.attachments]
if image_messages:
random_message = random.choice(image_messages)
attachment = random_message.attachments[0]
image_url = attachment.url
sender = random_message.author
await target_channel.send(image_url)
await target_channel.send(f'courtesy of {sender}')
else:
await ctx.send('no image found')
except discord.NotFound:
await ctx.send('source image not found')
@bot.command(name = 'problem')
async def ask(ctx):
await ctx.send('https://tenor.com/view/whattheproblemis-martin-lawrence-nationalsecurity-gif-27064298')
@bot.command(name = 'movie')
async def ask(ctx):
await ctx.send('What movie would you like to watch?')
def check(message):
return message.author == ctx.author and message.channel == ctx.channel
try:
user_response = await bot.wait_for('message', check=check, timeout=10.0)
await ctx.send(f'you want to watch: {user_response.content}')
response = requests.get(f'http://www.omdbapi.com/?t={user_response.content}&apikey={OMDB_KEY}')
imdb_id = response.json()['imdbID']
imdb_title = response.json()['Title']
imdb_plot = response.json()['Plot']
url = "https://streaming-availability.p.rapidapi.com/v2/get/basic"
querystring = {"country":"us",f"imdb_id":f"{imdb_id}","output_language":"en"}
headers = {
"X-RapidAPI-Key": "e145409a39mshf509ba14a206131p1acb3ejsnaeb0f27c7eb9",
"X-RapidAPI-Host": "streaming-availability.p.rapidapi.com"
}
response = requests.get(url, headers=headers, params=querystring)
example_json = response.json()
streaming_info = example_json['result']['streamingInfo']['us']
for i in streaming_info:
quality = streaming_info[i][0]['quality']
type_of_stream = streaming_info[i][0]['type']
link = streaming_info[i][0]['link']
await ctx.send(f'you can {type_of_stream} {imdb_title} on {i} in {quality} \n here is your link: {link}' )
await ctx.send(f' \n \n the plot is: {imdb_plot}')
except asyncio.TimeoutError:
await ctx.send('timeout. you did not send a response quick enough')
bot.run(TOKEN)
# @client.event
# async def on_ready():
# guild = discord.utils.get(client.guilds, name=GUILD)
# print(f'{client.user} has connected to the following guild:\n'
# f'{guild.name}(id: {guild.id})')
# @client.event
# async def on_message(message):
# # if the message is by the bot break the function - this stops endless loops
# if message.author == client.user:
# return
# # sends spongebob-text message
# if len(message.content) > 35:
# response = [x for x in message.content]
# for i in range(len(message.content)):
# upper_lower = random.randint(0,1)
# if upper_lower == 1:
# response[i] = message.content[i]
# elif upper_lower == 0:
# response[i] = message.content[i].upper()
# await message.channel.send(''.join(response))
# # looks for /d and number to roll random number generator
# if '/d' in message.content:
# num = re.search('(?<=\/d).[0-9]+', message.content)
# if num.group(0).isnumeric():
# string = f'{str(random.randrange(1,int(num.group(0))+1))}'
# await message.channel.send(string)
# else:
# await message.channel.send('thats not a number, try again')
# # responds with emoji
# if 'wz' in message.content.lower() or 'warzone' in message.content.lower() or 'cod' in message.content.lower() or 'call of duty' in message.content.lower():
# emoji = client.get_emoji(955552719379251300)
# await message.add_reaction(emoji)
# elif 'shot' in message.content.lower():
# emoji = client.get_emoji(951262317482479667)
# await message.add_reaction(emoji)
# for attachment in message.attachments:
# if any(attachment.filename.lower().endswith(image) for image in image_types):
# await attachment.save(f'attachments/{attachment.filename}')
# client.run(TOKEN)
|
aburpee/spongebob-text
|
app.py
|
app.py
|
py
| 5,748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3281849942
|
import torch
import wandb
from torch import nn
import torchvision.utils as vutils
def get_time_emb(dim, time):
pos = torch.arange(0, time, dtype=torch.float)
omega = torch.arange(dim // 2, dtype=torch.float)
omega /= dim / 2.0
omega = 1.0 / 10000 ** omega
out = torch.einsum("m,d->md", pos, omega)
emb_sin = torch.sin(out)
emb_cos = torch.cos(out)
emb = torch.concatenate([emb_sin, emb_cos], dim=1)
return emb
class SlotBert(nn.Module):
def __init__(self, slate, num_actions, time,
n_heads=4, dim_feedforward=512, num_layers=4, detach=False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.detach = detach
self.slate = slate
self.action_emb = nn.Embedding(num_actions, slate.slot_size)
self.rew_emb = nn.Linear(1, slate.slot_size)
self.modality_mask_emb = nn.Embedding(4, slate.slot_size)
self.time_emb = get_time_emb(slate.slot_size, time)
bert_layer = nn.TransformerEncoderLayer(slate.slot_size, n_heads, dim_feedforward, batch_first=True)
self.bert = nn.TransformerEncoder(bert_layer, num_layers)
self.act_ff = nn.Sequential(nn.Linear(slate.slot_size, slate.slot_size*2),
nn.GELU(),
nn.Linear(slate.slot_size*2, num_actions),
)
self.act_loss = torch.nn.CrossEntropyLoss()
@property
def device(self):
return next(self.parameters()).device
def embed_sep(self, obses, actions, rew, return_add=False):
target_sizes = list(obses.shape)[:2]
obses = torch.flatten(obses, 0, 1)
recon, ce, mse, attns, obses = self.slate(obses)
if self.detach:
obses = obses.detach()
obses_ret = torch.unflatten(obses, 0, target_sizes)
if return_add:
return obses_ret, self.action_emb(actions), self.rew_emb(rew.unsqueeze(-1)), mse, ce,\
torch.unflatten(recon, 0, target_sizes), torch.unflatten(attns, 0, target_sizes)
return obses_ret, self.action_emb(actions), self.rew_emb(rew.unsqueeze(-1)), mse, ce
def mask_sep(self, obses, actions, rew):
# 0 prob - 0 out
# 0 - mask (all masked)
gamma = torch.rand((obses.shape[0]), device=self.device)
gamma = gamma.unsqueeze(-1)
mask_probs_obses = torch.ones(obses.shape[:-1], device=self.device) * gamma.unsqueeze(-1)
mask_probs_actions_rew = torch.ones(actions.shape[:-1], device=self.device) * gamma
mask_obses = torch.bernoulli(mask_probs_obses).long()
mask_actions = torch.bernoulli(mask_probs_actions_rew).long()
mask_rew = torch.bernoulli(mask_probs_actions_rew).long()
return (
(obses * mask_obses.unsqueeze(-1)) + (1 - mask_obses.unsqueeze(-1)) * self.modality_mask_emb(
mask_obses),
(actions * mask_actions.unsqueeze(-1)) + (1 - mask_actions.unsqueeze(-1)) * self.modality_mask_emb(
mask_actions),
(rew * mask_rew.unsqueeze(-1)) + (1 - mask_rew.unsqueeze(-1)) * self.modality_mask_emb(mask_rew),
), (mask_obses, mask_actions, mask_rew)
def add_modality_sep(self, obses, actions, rew):
mod_obses = self.modality_mask_emb(torch.ones(obses.shape[:-1], dtype=torch.long, device=self.device))
mod_actions = self.modality_mask_emb(torch.ones(actions.shape[:-1], dtype=torch.long, device=self.device) * 2)
mod_rew = self.modality_mask_emb(torch.ones(rew.shape[:-1], dtype=torch.long, device=self.device) * 3)
return obses + mod_obses, actions + mod_actions, rew + mod_rew
def add_time_sep(self, obses, actions, rew):
actions_rew_time = self.time_emb.unsqueeze(0).to(self.device)
obses_time = actions_rew_time.unsqueeze(-2)
return obses + obses_time, actions + actions_rew_time, rew + actions_rew_time
def concat_all(self, obses, actions, rew):
actions_new = actions.unsqueeze(2)
rew_new = rew.unsqueeze(2)
stack = torch.cat([obses, actions_new, rew_new], dim=2)
stack = torch.flatten(stack, start_dim=1, end_dim=2)
return stack
def sep_to_seq(self, obses, actions, rewards):
obses, actions, rewards = self.add_modality_sep(obses, actions, rewards)
obses, actions, rewards = self.add_time_sep(obses, actions, rewards)
return self.concat_all(obses, actions, rewards)
def pass_to_bert(self, seq):
return self.bert(seq)
def forward(self, obses, actions, rewards):
t_obses, t_actions, t_rewards, mse, ce = self.embed_sep(obses, actions, rewards)
(m_obses, m_actions, m_rewards), (bm_o, bm_a, bm_r) = self.mask_sep(t_obses, t_actions, t_rewards)
masked_tokens = self.sep_to_seq(m_obses, m_actions, m_rewards)
masks = 1 - self.concat_all(bm_o, bm_a, bm_r) # mask = 0 should be included in loss
new_tokens = self.pass_to_bert(masked_tokens)
bert_mse = torch.mean((new_tokens - masked_tokens) ** 2 * masks.unsqueeze(-1))
# if self.detach:
# new_tokens = new_tokens.detach()
# TODO: check loss is correct
new_ttokens = new_tokens[:, self.slate.num_slots::self.slate.num_slots + 2]
actions_time = self.time_emb.unsqueeze(0).to(self.device)
mod_actions = self.modality_mask_emb(torch.ones(new_ttokens.shape[:-1],
dtype=torch.long, device=self.device) * 2)
new_ttokens = new_ttokens - actions_time - mod_actions
if self.detach:
new_ttokens = new_ttokens.detach()
new_actions = self.act_ff(new_ttokens)
loss = self.act_loss(new_actions.flatten(0, 1), actions.flatten(0, 1))
# END OF TD
return new_tokens, (mse, ce, bert_mse, loss)
@torch.no_grad()
def inv_din_inference(self, obses, actions, rewards):
losses = {}
# we should mask all rew
# we should mask last 2 actions
# we should not mask obses
# meaningful actions: last 2 obses are different
meaningful = torch.abs(obses[:, -1] - obses[:, -2])
meaningful = torch.max(torch.flatten(meaningful, 1), 1).values
meaningful = torch.eq(meaningful, torch.zeros_like(meaningful))
t_obses, t_actions, t_rewards, _, _, = self.embed_sep(obses, actions, rewards)
mask_obses = torch.ones(t_obses.shape[:-1], device=self.device).long()
mask_rew = torch.zeros(t_rewards.shape[:-1], device=self.device).long()
mask_actions = torch.ones(t_actions.shape[:-1], device=self.device).long()
mask_actions[:, -1] = 0
mask_actions[:, -2] = 0
m_obses, m_actions, m_rewards = (
(t_obses * mask_obses.unsqueeze(-1)) + (1 - mask_obses.unsqueeze(-1)) * self.modality_mask_emb(
mask_obses),
(t_actions * mask_actions.unsqueeze(-1)) + (1 - mask_actions.unsqueeze(-1)) * self.modality_mask_emb(
mask_actions),
(t_rewards * mask_rew.unsqueeze(-1)) + (1 - mask_rew.unsqueeze(-1)) * self.modality_mask_emb(mask_rew),
)
masked_tokens = self.sep_to_seq(m_obses, m_actions, m_rewards)
new_tokens = self.pass_to_bert(masked_tokens)
new_ttokens = new_tokens[:, self.slate.num_slots::self.slate.num_slots + 2]
actions_time = self.time_emb.unsqueeze(0).to(self.device)
mod_actions = self.modality_mask_emb(torch.ones(new_ttokens.shape[:-1],
dtype=torch.long, device=self.device) * 2)
new_ttokens = new_ttokens - actions_time - mod_actions
old_ttokens = masked_tokens[:, self.slate.num_slots::self.slate.num_slots + 2]
old_ttokens = old_ttokens - actions_time - mod_actions
new_actions = self.act_ff(new_ttokens)
new_action_emb = new_ttokens[:, -2]
old_action_emb = old_ttokens[:, -2]
losses['mse'] = torch.mean((new_action_emb - old_action_emb) ** 2)
losses['meaningful mse'] = torch.mean(((new_action_emb - old_action_emb)[meaningful]) ** 2)
distance = torch.norm(new_action_emb.unsqueeze(1) - self.action_emb.weight.data.unsqueeze(0), dim=2)
nearest = torch.argmin(distance, dim=1)
new_action = new_actions[:, -2]
old_action = actions[:, -2]
losses['cross entropy'] = self.act_loss(new_action, old_action)
losses['meanigful cross entropy'] = self.act_loss(new_action[meaningful],
old_action[meaningful])
new_action_max = torch.max(new_action, dim=1).indices
losses['accuracy'] = torch.sum(torch.eq(old_action, new_action_max)) / (old_action.shape[0])
losses['meanigful accuracy'] = torch.sum(torch.eq(old_action[meaningful],
new_action_max[meaningful])) / (
old_action[meaningful].shape[0])
losses['nearest accuracy'] = torch.sum(torch.eq(old_action, nearest)) / (old_action.shape[0])
losses['nearest meanigful accuracy'] = torch.sum(torch.eq(old_action[meaningful],
nearest[meaningful])) / (
old_action[meaningful].shape[0])
return losses
@torch.no_grad()
def forw_din_inference(self, obses, actions, rewards):
# we should mask all rew
# we should not mask actions
# we should mask last obs
losses = {}
t_obses, t_actions, t_rewards, _, _, recon, attns = self.embed_sep(obses, actions, rewards, return_add=True)
mask_obses = torch.ones(t_obses.shape[:-1], device=self.device).long()
mask_rew = torch.zeros(t_rewards.shape[:-1], device=self.device).long()
mask_actions = torch.ones(t_actions.shape[:-1], device=self.device).long()
mask_obses[:, -1] = 0
m_obses, m_actions, m_rewards = (
(t_obses * mask_obses.unsqueeze(-1)) + (1 - mask_obses.unsqueeze(-1)) * self.modality_mask_emb(
mask_obses),
(t_actions * mask_actions.unsqueeze(-1)) + (1 - mask_actions.unsqueeze(-1)) * self.modality_mask_emb(
mask_actions),
(t_rewards * mask_rew.unsqueeze(-1)) + (1 - mask_rew.unsqueeze(-1)) * self.modality_mask_emb(mask_rew),
)
masked_tokens = self.sep_to_seq(m_obses, m_actions, m_rewards)
new_tokens = self.pass_to_bert(masked_tokens)
new_tokens = new_tokens.unflatten(1, (-1, self.slate.num_slots + 2))
old_tokens = masked_tokens.unflatten(1, (-1, self.slate.num_slots + 2))
new_slots = new_tokens[:, -1, :-2]
old_slots = old_tokens[:, -1, :-2]
losses['mse slots'] = torch.mean((new_slots - old_slots) ** 2)
new_slots_deemb = new_slots - self.modality_mask_emb(torch.ones(new_slots.shape[:-1],
dtype=torch.long,
device=self.device))
new_slots_deemb = new_slots_deemb - self.time_emb.to(self.device).unsqueeze(-2)[-1:]
old_slots_deemb = old_slots - self.modality_mask_emb(torch.ones(old_slots.shape[:-1],
dtype=torch.long,
device=self.device))
old_slots_deemb = old_slots_deemb - self.time_emb.to(self.device).unsqueeze(-2)[-1:]
reconstruct = self.slate.reconstruct_slots(new_slots_deemb)
reconstruct_old = self.slate.reconstruct_slots(old_slots_deemb)
losses['mse images slate-bert'] = torch.mean((reconstruct - reconstruct_old) ** 2)
losses['mse images gt-slate'] = torch.mean((obses[:, -1] - reconstruct_old) ** 2)
losses['mse images gt-bert'] = torch.mean((reconstruct - obses[:, -1]) ** 2)
reconstruct = torch.cat([obses[:16, -1], recon[:16, -1], reconstruct[:16], reconstruct_old[:16]], dim=0)
grid = vutils.make_grid(reconstruct, nrow=16, pad_value=0.2)[:, 2:-2, 2:-2]
attns_grid = vutils.make_grid(torch.flatten(attns[:16, -1], 0, 1), nrow=16, pad_value=0.2)[:, 2:-2, 2:-2]
losses['visualisation'] = wandb.Image(grid)
losses['attns'] = wandb.Image(attns_grid)
return losses
|
Shimanogov/bert-slots
|
model.py
|
model.py
|
py
| 12,552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12864757051
|
import streamlit as st
import pandas as pd
import numpy as np
import re
import json
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import math
import warnings
warnings.filterwarnings('ignore')
from PIL import Image
# Page setup
st.set_page_config(page_title = "Python Tweets Search Engine", page_icon = "🐍", layout = "wide")
st.title("Python Tweets Search Engine")
df = pd.read_csv('preprocessed_data.csv').fillna('')
inverted_index = json.load(open("inverted_index.json"))
# Define a function to tokenize and clean the text
def clean_text(text):
text = re.sub(r"http\S+", "", text) # Remove URLs
text = re.sub(r'[^\w\s]', '', text) # Remove punctuation
text = text.lower() # Convert text to lowercase
return text.split()
# ------------------------------------------------------------------------------------------------------------
# Define the Boolean model function
def boolean_model(query):
#corpus = pd.read_csv('preprocessed_data.csv')['content'].tolist()
corpus_raw = pd.read_csv('raw_data.csv')
# Pre-process the query
query = clean_text(query)
# Split query into terms
if not query:
return []
terms = query
# Find matching documents for each term
results = []
#univ_set = set([x for x in range(len(corpus_raw))])
for i, term in enumerate(terms):
if term in inverted_index:
if terms[i-1] != 'not':
results.append(inverted_index[term])
else:
#results.append(univ_set.difference(set(inverted_index[term])))
pass
else:
results.append(set())
#print(results)
# Combine the sets using Boolean operators
combined_results = set()
for i, term_result in enumerate(results):
term_result = set(term_result) # convert list to set
if i == 0:
combined_results = term_result
else:
if terms[i-1] == 'and':
combined_results = combined_results.intersection(term_result)
elif terms[i-1] == 'or':
combined_results = combined_results.union(term_result)
# Get the documents matching all terms
# matching_docs = [corpus[i] for i in combined_results]
df = corpus_raw
return df[df.index.isin(combined_results)]
# ------------------------------------------------------------------------------------------------------------
# Define a function to handle wildcard queries
def handle_wildcard_query(query):
pattern = query.replace('*', '.*')
regex = re.compile(pattern)
matching_terms = [term for term in inverted_index.keys() if regex.match(term)]
doc_ids = set([doc_id for term in matching_terms for doc_id in inverted_index[term]])
return doc_ids
# ------------------------------------------------------------------------------------------------------------
# Define a function to handle phrase queries
def handle_phrase_query(query):
query = re.sub(r"http\S+", "", query) # Remove URLs
query = re.sub(r'[^\w\s]', '', query) # Remove punctuation
query_terms = query.lower().split()
phrase_docs = []
for i in range(len(df)):
doc = df.iloc[i]
doc_text = doc['content']
for pos in range(len(doc_text.split())):
if doc_text.split()[pos] == query_terms[0]:
match = True
for j in range(1, len(query_terms)):
if pos+j >= len(doc_text.split()):
match = False
break
next_term = doc_text.split()[pos+j]
if not next_term == query_terms[j]:
match = False
break
if match:
phrase_docs.append(i)
break
return phrase_docs
# ------------------------------------------------------------------------------------------------------------
# Define a function to calculate precision and recall
def calc_precision_recall(relevant_docs, retrieved_docs):
tp = len(set(relevant_docs) & set(retrieved_docs))
fp = len(retrieved_docs) - tp
fn = len(relevant_docs) - tp
precision = tp / (tp + fp) if tp + fp > 0 else 0
recall = tp / (tp + fn) if tp + fn > 0 else 0
return precision, recall
# ------------------------------------------------------------------------------------------------------------
# Example usage
def query_app(wq, pq):
wildcard_query = wq
phrase_query = pq
wildcard_doc_ids = handle_wildcard_query(wildcard_query)
phrase_doc_ids = handle_phrase_query(phrase_query)
print(f'Wild card query: {wildcard_query}, matching doc ids: {wildcard_doc_ids}')
print(f'Phrase query: {phrase_query}, matching doc ids: {phrase_doc_ids}')
# ------------------------------------------------------------------------------------------------------------
def query_pr_app(wq, pq, relevant_docs):
wildcard_query = wq
phrase_query = pq
wildcard_doc_ids = handle_wildcard_query(wildcard_query)
phrase_doc_ids = handle_phrase_query(phrase_query)
print(f'Wild card query: {wildcard_query}, matching doc ids: {wildcard_doc_ids}')
print(f'Phrase query: {phrase_query}, matching doc ids: {phrase_doc_ids}')
print('---')
print('Evaluation:')
print(f'Number of relevant documents: {len(relevant_docs)}')
wildcard_precision, wildcard_recall = calc_precision_recall(relevant_docs, wildcard_doc_ids)
print(f'Wild card query precision: {wildcard_precision}, recall: {wildcard_recall}')
phrase_precision, phrase_recall = calc_precision_recall(relevant_docs, phrase_doc_ids)
print(f'Phrase query precision: {phrase_precision}, recall: {phrase_recall}')
# ------------------------------------------------------------------------------------------------------------
def retrieve_using_cosine_similarity(query, num_docs = 5):
# Tokenize and clean the query
query_tokens = clean_text(query)
corpus = df['content'].tolist()
corpus_raw = pd.read_csv('raw_data.csv')['content'].tolist()
# Retrieve documents containing at least one query term
candidate_doc_ids = set()
for query_token in query_tokens:
if query_token in inverted_index:
candidate_doc_ids.update(inverted_index[query_token])
# Calculate the cosine similarity between the query and candidate documents
candidate_docs = [corpus[doc_id] for doc_id in candidate_doc_ids]
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(candidate_docs)
query_vector = vectorizer.transform([query])
cosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()
# Sort the candidate documents by cosine similarity in descending order and get the top documents
document_indices = cosine_similarities.argsort()[::-1][:num_docs]
return [corpus.index(candidate_docs[index]) for index in document_indices]
# ------------------------------------------------------------------------------------------------------------
def log_likelihood(query, num_docs):
corpus = df['content'].tolist()
query = re.sub(r"http\S+", "", query) # Remove URLs
query = re.sub(r'[^\w\s]', '', query) # Remove punctuation
query_tokens = query.lower().split()
query_likelihood = {}
for token in query_tokens:
if token in query_likelihood:
query_likelihood[token] += 1
else:
query_likelihood[token] = 1
query_length = sum(query_likelihood.values())
for token in query_likelihood:
query_likelihood[token] = query_likelihood[token] / query_length
# Retrieve the documents that contain any of the query tokens
retrieved_docs = set()
for token in query_tokens:
if token in inverted_index:
retrieved_docs.update(inverted_index[token])
# Compute the likelihood of each retrieved document
doc_likelihoods = {}
for doc_id in retrieved_docs:
doc_tokens = corpus[doc_id].lower().split()
doc_length = len(doc_tokens)
likelihood = 0
for token in query_likelihood:
count = doc_tokens.count(token)
token_likelihood = count / doc_length if count > 0 else 1 / (doc_length + 1)
likelihood += math.log(token_likelihood) * query_likelihood[token]
doc_likelihoods[doc_id] = likelihood
# Rank the retrieved documents by their likelihood
sorted_docs = sorted(doc_likelihoods.items(), key=lambda x: x[1], reverse=True)
# Get the top N documents
document_indices = [index for index, (doc_id, likelihood) in enumerate(sorted_docs[:num_docs]) if doc_id in retrieved_docs]
# Return the indices of the top N documents
return [corpus.index(sorted_docs[index][0]) for index in document_indices]
# ------------------------------------------------------------------------------------------------------------
# Define a function to retrieve documents using cosine similarity with relevance feedback
def retrieve_using_cosine_similarity_with_feedback(query, rel_list, num_docs = 5, alpha = 1, beta = 0.75, gamma = 0.15):
# Transform the query using the vectorizer
corpus = df['content'].tolist()
corpus_raw = pd.read_csv('raw_data.csv')['content'].tolist()
# Create a TF-IDF vectorizer and transform the corpus
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(corpus)
query_vector = vectorizer.transform([query])
# Calculate the cosine similarity between the query and all documents in the corpus
cosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()
# Sort the documents by cosine similarity in descending order and get the top documents
document_indices = cosine_similarities.argsort()[::-1][:num_docs]
top_documents = [(corpus_raw[index], cosine_similarities[index]) for index in document_indices]
# Print the top documents
print(document_indices)
print(f"Showing top {num_docs} documents that are most similar to the query '{query}':\n")
for i, (text, cosine_sim) in enumerate(top_documents):
print(f"Rank {i+1} (Cosine Similarity: {cosine_sim:.4f}):")
print(text)
print("Reason: The document has a high cosine similarity score with the query.\n")
# Get feedback from the user on the relevance of the search results
relevant_doc_indices = []
non_relevant_doc_indices = []
print(rel_list, type(rel_list))
for i in range(len(top_documents)):
if(str(i) in rel_list):
relevant_doc_indices.append(document_indices[i])
else:
non_relevant_doc_indices.append(document_indices[i])
# Calculate the new query vector using the Rocchio algorithm
relevant_doc_vectors = tfidf_matrix[relevant_doc_indices]
non_relevant_doc_vectors = tfidf_matrix[non_relevant_doc_indices]
new_query_vector = alpha * query_vector + beta * relevant_doc_vectors.mean(axis=0) - gamma * non_relevant_doc_vectors.mean(axis=0)
# Calculate the cosine similarity between the new query vector and all documents in the corpus
cosine_similarities = cosine_similarity(np.asarray(new_query_vector), tfidf_matrix).flatten()
# Sort the documents by cosine similarity in descending order and get the top documents
document_indices = cosine_similarities.argsort()[::-1][:num_docs]
top_documents = [(corpus_raw[index], cosine_similarities[index]) for index in document_indices]
print(document_indices, top_documents)
print(type(document_indices), type(top_documents))
# Print the reranked top documents
print(f"\nShowing top {num_docs} reranked documents that are most similar to the query '{query}':\n")
for i, (text, cosine_sim) in enumerate(top_documents):
print(f"Rank {i+1} (Cosine Similarity: {cosine_sim:.4f}):")
print(text)
print("Reason: The document has a high cosine similarity score with the reranked query.\n")
return list(document_indices)
# ------------------------------------------------------------------------------------------------------------
# Test the Boolean model
option = st.selectbox(
'Type of query :',
('Boolean', 'Phrase', 'Wildcard', 'Cosine Similarity' , 'Relevance'))
N_cards_per_row = 3
max_results = 24
image = Image.open("icon.png")
resized_image = image.resize((300, 300))
st.sidebar.image(resized_image, width = 250)
for _ in range(5):
st.sidebar.text("\n")
st.sidebar.text("This app is to serve as a front-end \nfor the tweets dataset search \nengine system implemented for\nAIRIW Assignment 1 in Python.")
df1 = pd.read_csv('raw_data.csv')
st.info("Search tweets by Boolean, Phrase, Wildcard, Cosine , Likelihood or Relevant")
text_search = st.text_input("Enter your query :")
if st.button('Go'):
st.success("Searching... Your query is being processed !!!")
if(option == 'Boolean'):
df_search = boolean_model(text_search)
elif(option == 'Phrase'):
df_search = df1[df1.index.isin(handle_phrase_query(text_search))]
elif(option == 'Wildcard'):
df_search = df1[df1.index.isin(handle_wildcard_query(text_search))]
elif(option == 'Cosine Similarity'):
df_search = df1[df1.index.isin(retrieve_using_cosine_similarity(text_search, max_results))]
# elif(option == 'Log Likelihood'):
# df_search = df1[df1.index.isin(log_likelihood(text_search, max_results))]
elif(option == 'Relevance'):
rel_lis = st.text_input("Enter relevant docs as a list")
if rel_lis:
st.write('Feedback submitted! New results are: ')
df_search = df1[df1.index.isin(retrieve_using_cosine_similarity_with_feedback(text_search, rel_lis.split(','), max_results))]
else:
df_search = df1[df1.index.isin(retrieve_using_cosine_similarity(text_search, max_results))]
df_search = df_search[:max_results]
if text_search:
with st.expander("Click to see dataframe view"):
st.write(df_search)
for n_row, row in df_search.reset_index().iterrows():
i = n_row % N_cards_per_row
if i == 0:
st.write("---")
cols = st.columns(N_cards_per_row, gap = "large")
# draw the card
with cols[n_row % N_cards_per_row]:
st.caption(f"(Result No.: {n_row}) Tweet:")
st.markdown(f"**{row['content'].strip()}**")
st.markdown(f"*{row['publish_date'].strip()}*")
|
smsraj2001/MINI-SEARCH-ENGINE
|
app.py
|
app.py
|
py
| 14,858 |
python
|
en
|
code
| 2 |
github-code
|
6
|
17195304707
|
#threading better due to network I/O hinderances
#from threading import Thread
#multiprocessing used for cpu intensive processes (no networking hinderances)
from multiprocessing import Process, Queue
from time import time
def check_value_in_list(x, j, num_of_processes, queue):
max_num_to_check = 10**8
lower_bnd = int((j * max_num_to_check)/num_of_processes)
upper_bnd = int(((j + 1) * max_num_to_check)/num_of_processes)
num_of_hits = 0
for i in range(lower_bnd, upper_bnd):
if i in x:
num_of_hits += 1
queue.put((lower_bnd, upper_bnd, num_of_hits))
#num_threads = 4
def run():
comparison_list = [1,2,3]
num_processes = 4
queue = Queue()
processes = []
for i in range(num_processes):
t = Process(target=check_value_in_list, args=(comparison_list, i, num_processes, queue))
processes.append(t)
for t in processes:
t.start()
for t in processes:
t.join()
queue.put("DONE")
while True:
v = queue.get()
if v == "DONE":
break
lower, upper, num_of_hits = v
print("Between", lower, "and", upper, "we have", num_of_hits, "values in the list")
if __name__ == "__main__":
start_time = time()
run()
print("Script time:", time() - start_time, " seconds")
|
ganton000/Concurrency
|
multiprocessing-tutorial/main.py
|
main.py
|
py
| 1,200 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43633237973
|
from __future__ import absolute_import
#typing
import numpy
#overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy
class SimpleTagger(Model):
u"""
This ``SimpleTagger`` simply encodes a sequence of text with a stacked ``Seq2SeqEncoder``, then
predicts a tag for each token in the sequence.
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab ,
text_field_embedder ,
encoder ,
initializer = InitializerApplicator(),
regularizer = None) :
super(SimpleTagger, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size(u"labels")
self.encoder = encoder
self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
self.num_classes))
check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
u"text field embedding dim", u"encoder input dim")
self.metrics = {
u"accuracy": CategoricalAccuracy(),
u"accuracy3": CategoricalAccuracy(top_k=3)
}
initializer(self)
#overrides
def forward(self, # type: ignore
tokens ,
tags = None,
metadata = None) :
# pylint: disable=arguments-differ
u"""
Parameters
----------
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels of shape
``(batch_size, num_tokens)``.
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata containg the original words in the sentence to be tagged under a 'words' key.
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
a distribution of the tag classes per word.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
batch_size, sequence_length, _ = embedded_text_input.size()
mask = get_text_field_mask(tokens)
encoded_text = self.encoder(embedded_text_input, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view([batch_size,
sequence_length,
self.num_classes])
output_dict = {u"logits": logits, u"class_probabilities": class_probabilities}
if tags is not None:
loss = sequence_cross_entropy_with_logits(logits, tags, mask)
for metric in list(self.metrics.values()):
metric(logits, tags, mask.float())
output_dict[u"loss"] = loss
if metadata is not None:
output_dict[u"words"] = [x[u"words"] for x in metadata]
return output_dict
#overrides
def decode(self, output_dict ) :
u"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a ``"tags"`` key to the dictionary with the result.
"""
all_predictions = output_dict[u'class_probabilities']
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [self.vocab.get_token_from_index(x, namespace=u"labels")
for x in argmax_indices]
all_tags.append(tags)
output_dict[u'tags'] = all_tags
return output_dict
#overrides
def get_metrics(self, reset = False) :
return dict((metric_name, metric.get_metric(reset)) for metric_name, metric in list(self.metrics.items()))
SimpleTagger = Model.register(u"simple_tagger")(SimpleTagger)
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/models/simple_tagger.py
|
simple_tagger.py
|
py
| 6,916 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
40201462407
|
import cv2
import numpy as np
import pandas as pd
import json
from scipy.spatial.distance import cdist
import os
# Get fps of given video
def getFps(path):
vidObj = cv2.VideoCapture(path)
fps = vidObj.get(cv2.CAP_PROP_FPS)
print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
vidObj.release()
return int(fps)
# Given the path to video, the fps and the second of needed frame, it returns the frame in jpg format ( needed for testing and trials )
def saveFrame(path, fps, frame_no):
# Path to video file
vidObj = cv2.VideoCapture(path)
# Used as counter variable
count = 0
success = 1
frame_count = 0
while success:
success, img = vidObj.read()
if frame_count == frame_no:
cv2.imwrite("frame"+str(frame_count)+".jpg",img)
break
# Catch the frames per second
if count % fps == 0:
frame_count = frame_count + 1
count += 1
# Path: Path to the video to capture descriptors
# Fps: Fps of the video
# Interval: Array with two elements that indicate the start and end time of video to capture ([0,420] for first 7 min)
# No_of_descriptors: SIFT captures many descriptors most of which are unnecessary. This parameter determines the number of descriptors to capture with biggest blobs.
# Can be reduced to some extent with efficiency concerns.
# Folder_to_save: Descriptors are saved to a subfolder under ./descriptors. Name of the subfolder should be given.
# Function saves 3 files:
# * address.json: Mapping of descriptors to frames ({"352":2} means descriptor in 352. row is the first descriptor of frame 2)
# * descriptors.npy: A 2d numpy array where each row is a descriptor (which is a 128 byte array). Each frame has no_of_descriptors rows in this array.
# * angles.npy: A 2d array that keeps principle angle of each keypoint in a frame in each row.
# (Each row has no_of_descriptors elements since there are no_of_descriptors keypoints for each frame. And there are as many rows as the number of frames captured.)
# Ex. interval = [20,40] and no_of_descriptors = 150
# Then the frames between 20. and 40. seconds of the given video are analyzed.
# descriptors.npy will have the shape (150*20, 128) since each row is a descriptor and total number of descriptors is 150*20
# angles.npy will have the shape (20,150) since each row is a frame and each descriptor is a column
def captureDescriptors(path, fps, interval, folder_to_save, no_of_descriptors=150):
# Path to video file
vidObj = cv2.VideoCapture(path)
# Used as counter variable
count = 0
success = 1
start = interval[0]
end = interval[1]
detect = cv2.xfeatures2d.SIFT_create(no_of_descriptors)
all_desc = None
all_angles =[]
for i in range(start):
all_angles.append([])
first = True
rowcount = 0
frame_address = {} # the mapping from row of decriptors to the frame number
frame_count = start # we catch the frame by second
while success:
if (count / fps) >= end:
break
success, img = vidObj.read()
if (count / fps) < start:
count += 1
continue
# Catch the frames per second
if count % fps == 0:
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
keypoints, descriptors = detect.detectAndCompute(img,None)
angles = [int(key.angle) for key in keypoints]
all_angles.append(angles)
if first:
all_desc = descriptors
first = False
else:
all_desc = np.concatenate((all_desc, descriptors))
frame_address[rowcount] = frame_count
rowcount = rowcount + len(descriptors)
frame_count = frame_count + 1
count += 1
if not os.path.exists("./descriptors/"+folder_to_save):
os.mkdir("./descriptors/"+folder_to_save)
np.save("./descriptors/"+folder_to_save+"/angles", all_angles)
np.save("./descriptors/"+folder_to_save+"/descriptors", all_desc)
with open('./descriptors/'+folder_to_save+'/address.json', 'w') as fp:
json.dump(frame_address, fp)
print("Features saved")
# Path: Path to the video to analyze
# Fps: Fps of the video
# Interval: Array with two elements that indicate the start and end time of video to analyze ([420,840] between 7. and 14. mins)
# No_of_descriptors: SIFT captures many descriptors most of which are unnecessary. This parameter determines the number of descriptors to capture with biggest blobs
# Desc: descriptors.npy which is obtained by captureDescriptors()
# Sq: address.json which is obtained by captureDescriptors()
# Ang: angles.npy which is obtained by captureDescriptors()
# Ratio: When a descriptor is compared to a set of descriptors, we call the most similar pair a "match".
# To call it a "good match", we need that the distance of the match must me smaller than a ratio of the second best match.
# If ratio = 0.7, distances of first two matches are d1 and d2, the match with distance of d1 is a good match if d1 < 0.7*d2.
# We only count the good matches, thus ratio is an important parameter.
# Dumpfile: The file to write the matching results. (need to be a .csv)
# Function reads the given interval of the video, extracts the SIFT features of each frame, then compares the features with the ones in database.
# For our case, the database is given with desc, sq, ang. This can be changed. With the comparison, match results are written to a .csv file.
def analyzeFrames(path, interval, desc, sq, ang, no_of_descriptors, fps, dumpfile, ratio = 0.75):
# Path to video file
vidObj = cv2.VideoCapture(path)
# Used as counter variable
count = 0
success = 1
start = interval[0]
end = interval[1]
detect = cv2.xfeatures2d.SIFT_create(no_of_descriptors)
first = True
while success:
if (count / fps) >= end:
break
success, img = vidObj.read()
if (count / fps) < start:
count += 1
continue
# Catch the frames per second
if count % fps == 0:
frame_no = int(count/fps)
print(frame_no)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
new_keypoints, new_descriptors = detect.detectAndCompute(img,None)
angles = [int(key.angle) for key in new_keypoints]
d = np.array(cdist(new_descriptors, desc))
matches, matched, glob_match = getMatchFromDistance(sq, d, ratio)
startidx = 0
for key, value in sq.items():
if value == matched:
startidx = int(key)
break
matched_ang1 = []
matched_ang2 = []
for m in glob_match:
new_idx = m[0]
old_idx = m[1]
if old_idx>=startidx and old_idx <startidx + no_of_descriptors:
idx = old_idx - startidx
angle1 = angles[new_idx]
angle2 = ang[matched][idx]
matched_ang1.append(angle1)
matched_ang2.append(angle2)
angle, _ = detectAngle(matched_ang1, matched_ang2)
writeMatches(frame_no, len(sq), matches, matched, angle, first, dumpfile)
if first:
first = False
count += 1
# d: The distance matrix between descriptors of a frame and the set of descriptors in the database.
# Shape of d is (n,m) if current frame has n descriptors and there are m descriptors in database.
# d_ij = Distance between the ith descriptor of the frame and jth descriptor in the database.
# Function returns 3 things:
# * matches: An array that counts the number of matches between the current frame and each of the frames in database.
# * matched: argmax(matches) , the frame that is the best match of the current frame (test frame)
# * glob_match: An array of tuples where each element (i,j) is a pair of indices of matched descriptors.
# (i,j) means that ith descriptor of test frame is matched with jth descriptor in database. We get this to find relative angles.
def getMatchFromDistance(sq, d, ratio):
rows, _ = d.shape
matches = [0 for _ in range(len(sq))]
indices = []
glob_match = []
for i in range(rows):
row = d[i]
min1, min2 = np.partition(row, 1)[0:2]
if min1 < ratio*min2:
# means this is a good match
idx = np.where(row == min1)[0][0]
indices.append(idx)
glob_match.append((i,idx))
for idx in indices:
last = '0'
for k in sq:
if idx > int(k):
last = k
continue
else:
matched_square = sq[last]
matches[matched_square] += 1
break
matched = np.argmax(matches)
return matches, matched, glob_match
# http://amroamroamro.github.io/mexopencv/matlab/cv.SIFT.detectAndCompute.html
# Gets two arrays of angles to compare. Arrays have one to one correspondence. That is, ith elements of both arrays belong to matched keypoints.
# Difference between each corresponding pair of angles is calculated.
# The most common difference is inferred to be the relative angle between test frame and matched database frame.
def detectAngle(angles1, angles2):
counter = np.array([0 for i in range(360) ])
for i in range(len(angles1)):
diff = angles1[i] - angles2[i]
if diff < 0:
diff += 360
counter[diff] += 1
return np.argmax(counter), np.max(counter)
# Matching results are written to a csv file.
def writeMatches(frame_no, no_of_frames, matches, matched, angle, first, dumpfile):
if not os.path.exists("./matches"):
os.mkdir("./matches")
total_matches = sum(matches)
max_match = matches[matched]
if not first:
df = pd.read_pickle(dumpfile)
else:
columns = ["Frame no","Matched Frame", "Angle" ,"Total Matches", "Max Match"]
for i in range(no_of_frames):
columns.append(i)
df = pd.DataFrame(columns=columns)
dic = {"Frame no": [frame_no], "Matched Frame": [matched], "Angle":[angle], "Total Matches":[total_matches], "Max Match":[max_match]}
for i in range(no_of_frames):
dic[i] = [matches[i]]
df2 = pd.DataFrame(dic, index=[0])
df = pd.concat([df, df2], sort=False)
df.to_pickle(dumpfile)
# folder name of the run, will appear under matches directory
folder = "whitesquares"
# parameters of captureDescriptors()
train_video = "./videos/karolar_2.mov"
train_fps = 30
train_interval = [0,430]
train_descriptors = 150
# parameters of analyzeFrames()
query_video = "./videos/karolar_2.mov"
query_fps = 30
query_interval = [430,1320]
query_descriptors = 150
ratio = 0.75
# make it false if the descriptors in the database are being used
train = True
test = False
if train:
captureDescriptors(path = train_video,fps = train_fps, interval = train_interval, folder_to_save = folder, no_of_descriptors = train_descriptors)
if test:
with open('./descriptors/'+folder+'/address.json', 'r') as fp:
sq = json.load(fp)
with open('./descriptors/'+folder+'/descriptors.npy', 'rb') as f:
desc = np.load(f)
with open('./descriptors/'+folder+'/angles.npy', 'rb') as f:
ang = np.load(f,allow_pickle=True)
analyzeFrames(path = query_video, interval = query_interval, desc = desc, sq = sq, ang = ang, no_of_descriptors = query_descriptors,
fps = query_fps, folder = './matches/'+folder+'.csv', ratio = ratio)
df = pd.read_pickle("./matches/"+folder+".csv")
df.to_csv("./matches/"+folder+".csv")
|
orhungorkem/SIFTDetector
|
main.py
|
main.py
|
py
| 11,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17944337782
|
import json
from django.views.generic import DetailView, ListView, View, CreateView
from django.core.exceptions import ImproperlyConfigured
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseRedirect
)
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from .models import Suggestion, SuggestionCopy
from .forms import AddSuggestionForm
class IndexView(DetailView):
"""
The landing page for the site
"""
model = Suggestion
template_name = 'suggestions/index.html'
def get_context_data(self, **kw):
ctx = super(IndexView, self).get_context_data(**kw)
ctx['recent_suggestions'] = SuggestionCopy.objects.all()[:5]
return ctx
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
try:
return queryset.order_by('?')[0]
except IndexError:
raise ImproperlyConfigured('No suggestions are installed')
def render_to_response(self, ctx, **kw):
if 'format' in self.request.GET:
if self.request.GET['format'] == 'json':
return HttpResponse(
json.dumps(
{
'id': ctx['object'].id,
'suggestion': str(ctx['object']),
'url': ctx['object'].get_absolute_url(),
'split_text': ctx['object'].split()
}
),
content_type='application/json'
)
return HttpResponseBadRequest('Format not supported')
return super(IndexView, self).render_to_response(ctx, **kw)
class SuggestionView(DetailView):
"""
A view for a single Suggestion
"""
model = Suggestion
class LoginRequiredMixin(object):
"""
Mixin to ensure a user is logged in; basically applies the login_required
decorator from auth module.
"""
@method_decorator(login_required)
def dispatch(self, *ar, **kw):
return super(LoginRequiredMixin, self).dispatch(*ar, **kw)
class GetSuggestionCopyQSMixin(object):
"""
We want to get the 10 latest suggestions that the user has had copied
or if there are none we create a new one.
"""
def get_queryset(self):
queryset = self.request.user.suggestions.all()[:10]
if not queryset.count():
SuggestionCopy.objects.create_random_for_user(self.request.user)
return self.request.user.suggestions.all()[:10]
return queryset
class UserView(LoginRequiredMixin, GetSuggestionCopyQSMixin, ListView):
"""
The logged in user's view
"""
template_name = 'suggestions/user.html'
class JSONResponseMixin(object):
def render_to_response(self, ctx, **kw):
return HttpResponse(json.dumps(ctx), 'application/json')
class SkipSuggestionView(
LoginRequiredMixin,
GetSuggestionCopyQSMixin,
JSONResponseMixin,
View
):
"""
Skip over the current suggestion for the user and return a new suggestion
"""
def get(self, request, *ar, **kw):
self.get_queryset()[0].delete()
SuggestionCopy.objects.create_random_for_user(self.request.user)
queryset = self.get_queryset()
return self.render_to_response({'suggestion': queryset[0].data})
class GetSuggestionCopySingleMixin(object):
def get_queryset(self):
queryset = self.request.user.suggestions.all()
if not queryset.count():
SuggestionCopy.objects.create_random_for_user(self.request.user)
return self.request.user.suggestions.all()
return queryset
def get_object(self, id):
return get_object_or_404(self.get_queryset(), pk=id)
class ActionSuggestionView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Mark the current suggestion for the user as actioned and return a new
suggestion
"""
def get(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
obj.suggestion.actioned_by.add(request.user)
suggestion = SuggestionCopy.objects.create_random_for_user(
request.user
)
return self.render_to_response({'suggestion': suggestion.data})
class LikeSuggestionView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Mark a suggestion as liked by the user and return the amount of likes
"""
def get(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
obj.suggestion.liked_by.add(request.user)
return self.render_to_response(
{'likes': obj.suggestion.liked_by.count()}
)
class PutBackView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Put a crossed suggestion back to current by making a copy of it
"""
def get(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
self.get_queryset()[0].delete()
suggestion = SuggestionCopy.objects.create_from_suggestion_for_user(
obj.suggestion,
request.user
)
return self.render_to_response({'suggestion': suggestion.data})
class UpdateTextView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Update the text for "them" in the selected suggestion
"""
def post(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
if not 'text' in request.POST:
return HttpResponseBadRequest('No text supplied')
obj.them_text = request.POST['text']
obj.save()
return self.render_to_response({'status': 'success'})
class AddSuggestionView(LoginRequiredMixin, CreateView):
"""
Allow a logged in user to add their own suggestion for review that can
be added (by an admin) to the pool of suggestions given on the site
"""
template_name = 'suggestions/add.html'
form_class = AddSuggestionForm
def get_success_url(self):
return reverse('suggestions:add')
def form_valid(self, form):
form.save(self.request.user)
messages.success(self.request, _('Thank you for your suggestion'))
return HttpResponseRedirect(self.get_success_url())
|
rvause/djangodash2013
|
suggestions/views.py
|
views.py
|
py
| 6,614 |
python
|
en
|
code
| 3 |
github-code
|
6
|
23588347895
|
from time import sleep
import psycopg2
import os
import subprocess
from datetime import datetime
from pytz import timezone
import filecmp
db_name = os.environ['POSTGRES_DB']
db_user = os.environ['POSTGRES_USER']
db_pass = os.environ['PGPASSWORD']
db_host = os.environ['POSTGRES_HOST']
db_port = os.environ['POSTGRES_PORT']
#frequency = 18000 if os.environ['BACKUP_INTERVAL'] is None else os.environ['BACKUP_INTERVAL']
#/mnt/data/db
KEEP_ALL = 0
def purgeBackup(old_file,new_file):
if filecmp.cmp(old_file,new_file) == True:
os.remove(old_file)
def loadBackup(backup_file):
ps = subprocess.Popen(
['psql', '-h', db_host, '-U', db_user, '-d', db_name, '-f', backup_file],
stdout=subprocess.PIPE
)
output = ps.communicate()[0]
for line in output.splitlines():
print(line)
def getFileInfo(path):
result={}
result['path'] = path
# file modification timestamp of a file
m_time = os.path.getmtime(path)
# file creation timestamp in float
c_time = os.path.getctime(path)
# convert timestamp into DateTime object
result['modifiedOn'] = datetime.fromtimestamp(m_time)
# convert creation timestamp into DateTime object
result['createdOn'] = datetime.fromtimestamp(c_time)
return result
#get last backup
data = [getFileInfo('/mnt/data/db/{}'.format(item)) for item in os.listdir("/mnt/data/db") if item.endswith(".sql")]
sorted_data=sorted(data, key=lambda i: i['modifiedOn'],reverse=True)
last_backup = sorted_data[0]['path']
print(f"Last backup: {last_backup}")
#Connecto to the database
db_string = 'postgres://{}:{}@{}:{}/{}'.format(db_user, db_pass, db_host, db_port, db_name)
print(db_string)
db = psycopg2.connect(db_string)
cursor = db.cursor()
cursor.execute("SELECT version();")
version = cursor.fetchone()
print(version)
cursor.execute("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public'")
count = cursor.fetchone()
if len(count) > 0 and count[0] == 0:
print("*** RECREATE DB from Backup! ****")
print('loading {}'.format(last_backup))
loadBackup(last_backup)
else:
print("continue....")
db.close()
while True:
tz = timezone('EST')
x = datetime.now(tz)
new_backup=f'/mnt/data/db/postgres-backup-{x.strftime("%m%d%y%H%M%S")}.sql'
print('Backing up %s database to %s' % (db_name, new_backup))
ps = subprocess.Popen(
['pg_dump', '-h', db_host, '-U', db_user, '-d', db_name, '-f', new_backup],
stdout=subprocess.PIPE)
output = ps.communicate()[0]
for line in output.splitlines():
print(line)
if KEEP_ALL == 0:
purgeBackup(last_backup,new_backup)
last_backup = new_backup
sleep(3600)
|
cjrisua/vinomio-api
|
docker/vinomioHC/app.py
|
app.py
|
py
| 2,723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6056727556
|
#!/usr/bin/env python3
import re
import collections
import vcf
import sys
import argparse
import copy
def parse_my_args():
parser = argparse.ArgumentParser("Combines a VCF of individual calls into one large VCF for population.")
parser.add_argument("vcf", nargs="?", help="Input VCF file; default stdin.")
parser.add_argument("-n", "--name", help="name of combined population.", required=True)
args = parser.parse_args()
return(args)
def get_arg_vars(args):
if args.vcf:
inconn = open(args.vcf, "r")
else:
inconn = sys.stdin
name = args.name
return(inconn, name)
def combine_vcf(vcfin, name, outwriter):
regex = re.compile(r"[/|]")
for record in vcfin:
calls = [call for call in record]
ad1 = 0
ad2 = 0
for call in calls:
callgt = regex.split(call["GT"])
if len(callgt) == 1:
inc = 2
else:
inc = 1
for i in callgt:
if i=="0":
ad1 += inc
if i=="1":
ad2 += inc
if ad1 >0 and ad2 > 0:
gt = "0|1"
elif ad2 > 0:
gt = "1"
else:
gt = "0"
writeout(gt, ad1, ad2, name, record, outwriter)
#print(cmh.summary())
#control_ad0, control_ad1 = [calls.AD[:2] for i in control]
#test_ad0, test_ad1 = [calls.AD[:2] for i in test]
def writeout(gt, ad1, ad2, name, record, writer):
newrecord = copy.deepcopy(record)
newrecord.samples = []
CallData = collections.namedtuple("CallData", ["GT", "AD"])
mycalldat = CallData(GT = str(gt), AD = [str(ad1), str(ad2)])
newrecord.samples.append(vcf.model._Call(newrecord, name, mycalldat))
writer.write_record(newrecord)
def main():
args = parse_my_args()
inconn, name = get_arg_vars(args)
vcfin = vcf.Reader(inconn)
outwriter = vcf.Writer(sys.stdout, vcfin)
combine_vcf(vcfin, name, outwriter)
inconn.close()
if __name__ == "__main__":
main()
#>>> for i in b:
#... for j in i:
#... try:print(j.data.AD)
|
jgbaldwinbrown/vcfstats
|
combine_single_indivs.py
|
combine_single_indivs.py
|
py
| 2,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11002868168
|
from typing import List
class WordFilter:
def __init__(self, words: List[str]):
self.a ={}
for ind, i in enumerate(words):
for j in range(len(i) + 1):
for k in range(len(i) + 1):
now = i[:j] + '$' + i[k:]
self.a[now] = ind
def f(self, prefix: str, suffix: str) -> int:
k = prefix + '$' + suffix
return self.a.get(k, -1)
if __name__ == '__main__':
words = ["apple"]
prefix = "a"
suffix = "e"
obj = WordFilter(words)
print(obj.f(prefix, suffix))
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
|
xixihaha1995/CS61B_SP19_SP20
|
745. Prefix and Suffix Search.py
|
745. Prefix and Suffix Search.py
|
py
| 702 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38046454992
|
#!/usr/bin/env python3
import os
import sys
import rzpipe
curdir = os.path.dirname(os.path.realpath(__file__))
rz = rzpipe.open(curdir + "/ls", ["-2"])
# print(rzpipe.__file__)
# print(rzpipe.VERSION)
rz.cmd("aa")
sys.stdout.write("/bin/ls ")
pi1 = rz.cmd("pi 1 @e:scr.color=0").strip()
if pi1 == "push rbp":
print("OK")
else:
print("FAIL")
# print(pi1)
# print(rz.cmd("pd 10"));
rz.quit()
|
rizinorg/rz-pipe
|
python/examples/test.py
|
test.py
|
py
| 407 |
python
|
en
|
code
| 26 |
github-code
|
6
|
4732908565
|
import xml.etree.ElementTree as ET
from datetime import datetime
from bs4 import BeautifulSoup
class XMLParser:
def __init__(self, file: str):
self.file = file
self.parameters = {'INPUT':{},
'DISCRIMINATOR':{},
'QDC':{},
'SPECTRA':{},
'REJECTIONS':{},
'ENERGY_CALIBRATION':{},
'SYNC':{},
'HARDWARE_COINCIDENCE':{},
'MISC':{}}
self.groups = list(self.parameters.keys())
self.reformatted = ['SRV_PARAM_CH_POLARITY','SRV_PARAM_CH_BLINE_NSMEAN','HARDWARE_COINCIDENCE','SRV_PARAM_START_MODE','SRV_PARAM_CH_SPECTRUM_NBINS','SRV_PARAM_CH_INDYN','SRV_PARAM_CH_CFD_FRACTION','SRV_PARAM_CH_DISCR_MODE','SRV_PARAM_CH_ENERGY_COARSE_GAIN','SRV_PARAM_TRGOUT_MODE']
self.reformatted_keys = ['polarity', 'baseline', 'coincidence', 'start', 'ebins', 'input_range', 'cfd', 'discriminator', 'coarse_gain', 'trig_out']
self.formatted = 0
self.board_formatted = False
def get_board_properties(self):
root = ET.parse(self.file).getroot()
name = root.find('board/label').text
id = root.find('board/id').text
model = root.find('board/modelName').text
adc_bits = root.find('board/adcBitCount').text
sample_rate = int(root.find('board/sampleTime').text)*10**6
dpp_type = root.find('board/dppType').text
roc = root.find('board/rocFirmware/major').text + '.' + root.find('board/rocFirmware/minor').text + ' build ' + str(hex(int(root.find('board/rocFirmware/build').text))).split('0x')[-1].zfill(4)
amc = root.find('board/amcFirmware/major').text + '.' + root.find('board/amcFirmware/minor').text + ' build ' + str(hex(int(root.find('board/amcFirmware/build').text))).split('0x')[-1].zfill(4)
link = root.find('board/connectionType').text + ' link #' + root.find('board/address').text
status = root.find('board/active').text
if status == 'true':
status = True
if status == 'false':
status = False
return name, id, model, adc_bits, sample_rate, dpp_type, roc, amc, link, status
def reformat(self, list_of_params):
"""
Reformats the values of the XML file.
Args:
list_of_params (list): Contains `polarity`, `baseline`, `coincidence`, `start`, `ebins`, `input_range`, `cfd`, `discriminator`, `coarse_gain`, 'trig_out`.
"""
if 'all' in list_of_params:
list_of_params.clear()
list_of_params = ['polarity', 'baseline', 'coincidence', 'start', 'ebins', 'input_range', 'cfd', 'discriminator', 'coarse_gain', 'trig_out']
#Formatting of the text values
if 'polarity' in list_of_params:
pol = self.parameters['INPUT']['SRV_PARAM_CH_POLARITY']
real_pol = pol.split('_')[-1]
self.parameters['INPUT']['SRV_PARAM_CH_POLARITY'] = real_pol
if 'baseline' in list_of_params:
bline = self.parameters['INPUT']['SRV_PARAM_CH_BLINE_NSMEAN']
real_bline = bline.split('_')[-1]
self.parameters['INPUT']['SRV_PARAM_CH_BLINE_NSMEAN'] = real_bline
if 'coincidence' in list_of_params:
coinc_mode = self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_MODE']
real_coinc_mode = coinc_mode[11:]
self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_MODE'] = real_coinc_mode
if 'start' in list_of_params:
start_mode = self.parameters['SYNC']['SRV_PARAM_START_MODE']
real_start_mode = start_mode[11:]
self.parameters['SYNC']['SRV_PARAM_START_MODE'] = real_start_mode
if 'ebins' in list_of_params:
energy_bins = self.parameters['SPECTRA']['SRV_PARAM_CH_SPECTRUM_NBINS']
real_energy_bins = energy_bins[5:] +'.0'
self.parameters['SPECTRA']['SRV_PARAM_CH_SPECTRUM_NBINS'] = real_energy_bins
if 'input_range' in list_of_params:
input_range = self.parameters['INPUT']['SRV_PARAM_CH_INDYN']
input_range = input_range.split('_')[1:]
real_input_range = input_range[0] + '.' + input_range[1]
self.parameters['INPUT']['SRV_PARAM_CH_INDYN'] = real_input_range
if 'cfd' in list_of_params:
cfd_frac = self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_FRACTION']
real_cfd_frac = cfd_frac.split('_')[-1]
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_FRACTION'] = real_cfd_frac
if 'discriminator' in list_of_params:
disc_mode = self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_DISCR_MODE']
disc_mode = disc_mode.split('_')[-1]
if disc_mode == "LED":
real_disc_mode = "Leading Edge Discriminator"
if disc_mode == "CFD":
real_disc_mode = "Constant Fraction Discriminator"
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_DISCR_MODE'] = real_disc_mode
if 'coarse_gain' in list_of_params:
coarse_gain = self.parameters['QDC']['SRV_PARAM_CH_ENERGY_COARSE_GAIN']
self.parameters['QDC']['SRV_PARAM_CH_ENERGY_COARSE_GAIN'] = coarse_gain.split('_')[1]
if 'trig_out' in list_of_params:
trig_out = self.parameters['SYNC']['SRV_PARAM_TRGOUT_MODE'].split('_')[2:]
real_trig_out =""
for elem in trig_out:
real_trig_out += elem + ' '
self.parameters['SYNC']['SRV_PARAM_TRGOUT_MODE'] = real_trig_out
#FIXING THE VALUES THAT ARE IN NANOSECONDS TO SECONDS FOR SPINMOB AUTOSCALING
if self.formatted == 0:
self.parameters['INPUT']['SRV_PARAM_RECLEN'] = float(self.parameters['INPUT']['SRV_PARAM_RECLEN'])*10**(-9)
self.parameters['INPUT']['SRV_PARAM_CH_PRETRG'] = float(self.parameters['INPUT']['SRV_PARAM_CH_PRETRG'])*10**(-9)
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_TRG_HOLDOFF'] = float(self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_TRG_HOLDOFF'])*10**(-9)
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_DELAY'] = float(self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_DELAY'])*10**(-9)
self.parameters['QDC']['SRV_PARAM_CH_GATE'] = float(self.parameters['QDC']['SRV_PARAM_CH_GATE'])*10**(-9)
self.parameters['QDC']['SRV_PARAM_CH_GATESHORT'] = float(self.parameters['QDC']['SRV_PARAM_CH_GATESHORT'])*10**(-9)
self.parameters['QDC']['SRV_PARAM_CH_GATEPRE'] = float(self.parameters['QDC']['SRV_PARAM_CH_GATEPRE'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T0'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T0'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T1'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T1'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T0'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T0'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T1'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T1'])*10**(-9)
self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_TRGOUT'] = float(self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_TRGOUT'])*10**(-9)
self.parameters['REJECTIONS']['SW_PARAMETER_CH_ENERGYCUTENABLE'] = (True if self.parameters['REJECTIONS']['SW_PARAMETER_CH_ENERGYCUTENABLE'] == 'true' else False)
self.formatted += 1
def get_parameters(self):
"""
Gets the board parameters (shared parameters for all channels).
"""
root = ET.parse(self.file).getroot()
board_parameters = root.find('board/parameters')
for entry in board_parameters:
key = entry.find('key').text
value = entry.find('value/value').text
group = entry.find('value/descriptor/group').text
if value == 'true':
value = True
if value == 'false':
value = False
for tab in self.groups:
if group == tab:
#if units is None:
self.parameters[tab][key] = value
#else:
#self.parameters[tab][key] = [value, units]
self.formatted = 0
self.reformat(['all'])
return self.parameters
def get_chn_parameters(self, chn_number: str):
root = ET.parse(self.file).getroot()
channels = root.findall('board/channel')
channel_in_use = channels[chn_number]
# keys = channel_in_use.findall('values/entry/key')
# values = channel_in_use.findall('values/entry/value')
entries = channel_in_use.findall('values/entry')
entries_with_vals = []
for index, entry in enumerate(entries):
if entry.find('value') is not None:
entries_with_vals.append(entries[index])
keys = []
values = []
for entry in entries_with_vals:
keys.append(entry.find('key'))
values.append(entry.find('value'))
list_format = []
for key in keys:
if key.text in self.reformatted:
list_format.append(self.reformatted_keys[self.reformatted.index(key.text)])
for group in self.parameters:
for index, key in enumerate(keys):
if key.text in self.parameters[group]:
if 'true' in values[index].text or 'false' in values[index].text:
values[index].text = (True if values[index].text == 'true' else False)
else:
self.parameters[group][key.text] = values[index].text
# self.formatted = 0
# self.reformat(list_format)
return self.parameters
def get_ch_label(self, chn_number: str):
root = ET.parse(self.file).getroot()
channels = root.findall('board/channel')
channel_to_check = channels[chn_number]
index = channel_to_check.find('index').text
entries = channel_to_check.findall('values/entry')
for entry in entries:
if entry.find('key').text == "SW_PARAMETER_CH_LABEL":
if entry.find('value') is not None:
label = entry.find('value').text
break
else: # I'm a genius. Don't mind me using disgusting functions in python.
label = "CH" #This is executed if the loop ends normally (so without encountering the break above.)
return (index, label)
class InfoParser:
def __init__(self, file: str):
self.file = file
def get_run_info(self):
with open(self.file) as f:
informations = f.readlines()[0:4]
self.id = informations[0].split('=')[-1][:-1]
self.time_start = datetime.strptime(informations[1].split('=')[-1].split('.')[0], "%Y/%m/%d %H:%M:%S")
self.time_stop = datetime.strptime(informations[2].split('=')[-1].split('.')[0], "%Y/%m/%d %H:%M:%S")
self.time_real = self.time_stop - self.time_start
return self.id, self.time_start, self.time_stop, self.time_real
if __name__ == '__main__':
file = "C:\\Users\\chloe\\OneDrive - McGill University\\Coincidence Testing\\Co60 Spectrums with different settings\\DAQ\\4096Chns-20lsb(LE)-80Gain-(300.80.50)-150s\\settings.xml"
test = XMLParser(file)
test.get_parameters()
print(test.get_ch_label(2))
|
Chujo58/ReadROOT
|
XML_Parser.py
|
XML_Parser.py
|
py
| 11,752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9642933289
|
import socket
udpsocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
udpsocket.bind(("localhost",7777))
print("UDP server is up and listening")
pair = udpsocket.recvfrom(1024)
res = pair[0]
add = pair[1]
print("Response from client :")
print("Message Received: ", res.decode())
print("Address of client: ", add, end = '\n\n')
udpsocket.sendto(str.encode("Connected to Server!"), add)
while(True):
pair = udpsocket.recvfrom(1024)
res = pair[0]
add = pair[1]
print("Client: ", res.decode())
message = input("ME: ")
udpsocket.sendto(str.encode(message), add)
|
vaibhav477/TCP_chat_app
|
UDP_Implementation/server.py
|
server.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20602647000
|
import sys
sys.path.append('../')
# sys.path.insert(0, '/path/to/application/app/folder')
# from flask import Flask
#from flask_testing import TestCase
import unittest
from unittest import TestCase
import read_ini
# class MyTest(TestCase):
# def create_app(self):
# app = Flask(__name__)
# app.config['TESTING'] = True
# return app
#
class TestDBcon(TestCase):
def setUp(self):
TestCase.setUp(self)
def test_settings_ini_has_all_required_keys(self):
expected_keys = ['name', 'age' , 'height']
filename = '../settings.ini'
ini_to_dict = read_ini.convert_settings_ini_to_dict(filename)
keys_retrived_from_ini_file = list(ini_to_dict) # or [*ini_to_dict]
for k in expected_keys:
self.assertIn(k,
keys_retrived_from_ini_file,
'we got %s' %
keys_retrived_from_ini_file)
# self.assertListEqual(keys_retrived_from_ini_file,
# expected_keys, 'we got %s' %
# keys_retrived_from_ini_file)
def test_get_value_from_settings_ini_file(self):
expected_result = 'bhujay'
self.assertEqual(read_ini.get_value_from_settings_ini_file('name'),
expected_result, 'Test result is % s' %
read_ini.get_value_from_settings_ini_file('name'))
if __name__ == '__main__':
unittest.main()
|
BhujayKumarBhatta/flask-learning
|
flaskr/tests/test_read_ini.py
|
test_read_ini.py
|
py
| 1,580 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74530690747
|
#!/usr/bin/env python3
"""antipatibot, discord server."""
import asyncio
import logging
import os
import secrets
from dataclasses import dataclass
import discord
from discord.ext import commands
import yt_dlp as youtube_dl
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': False,
'extract_flat': 'in_playlist', # don't extract stream urls / thumbnails # ... for playlists.
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
"""Youtube source class, which allows the bot to play youtube videos"""
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
"""Returns an audio from a youtube link."""
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# Take first item from a playlist.
# This should never happen, since we handle this in the play command,
# but better safe than sorry.
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
@dataclass()
class BotSettings:
"""Bot settings associated to each antipatibot instance."""
discord_token: str = os.getenv("ANTIPATIBOT_DISCORD_TOKEN", "")
command_prefix: str = os.getenv("ANTIPATIBOT_COMMAND_PREFIX", "!")
max_queue_size: int = int(os.getenv("ANTIPATIBOT_MAX_QUEUE_SIZE", "1000"))
idle_timeout: int = int(os.getenv("ANTIPATIBOT_IDLE_TIMEOUT", "300"))
@dataclass()
class GuildData:
"""Data associated to each guild: song queue, music task and lock."""
guild_id: int = None
lock: asyncio.Lock = asyncio.Lock()
queue: asyncio.Queue = None
task: asyncio.Task = None
loop: bool = False
def __init__(self, guild_id: int, max_queue_size: int, task: asyncio.Task = None):
self.guild_id = guild_id
self.task = task
self.queue = asyncio.Queue(max_queue_size)
# pylint: disable=R0201,R0904
class AntipatiBot(commands.Cog):
"""AntipatiBot's collection of command."""
def __init__(self, bot, log, settings: BotSettings):
self.bot = bot
self.log = log
self.settings = settings
self.guild_data = {}
async def cog_command_error(self, ctx, error):
message = ctx.message.content
self.log.error(f"command_error:{ctx.guild.id}:{self.log.sanitize(ctx.author)}" +
f":{ctx.command}:{ctx.author.id}:{message}:{error}")
await ctx.message.reply("Invalid command.")
async def cog_before_invoke(self, ctx):
self.log.info(
f"command:{ctx.guild.id}:{self.log.sanitize(ctx.author)}:{ctx.author.id}:{ctx.command}")
@commands.Cog.listener()
async def on_ready(self):
"""Triggers when the bot is ready to run, used to log basic information."""
self.log.info("login:%s", self.bot.user)
for guild in self.bot.guilds:
self.log.info("joined_guild:%d:%s", guild.id, self.log.sanitize(guild.name))
self.guild_data[guild.id] = GuildData(guild.id, self.settings.max_queue_size)
async def ensure_guild_thread(self, guild_id: int):
"""Ensure the music_player_loop thread is running for the given guild_id"""
guild_data = self.guild_data[guild_id]
async with guild_data.lock:
if guild_data.task is None:
guild_data.task = asyncio.create_task(self.music_player_loop(guild_data))
async def terminate_guild_thread(self, guild_id: int):
"""Kill the music_player_loop thread for the given guild_id"""
guild_data = self.guild_data[guild_id]
async with guild_data.lock:
if guild_data.task is not None:
guild_data.task.cancel()
guild_data.queue = asyncio.Queue(self.settings.max_queue_size)
guild_data.loop = False
guild_data.task = None
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState,
after: discord.VoiceState):
"""Triggers when the bot joins or leaves a voice channel.
Starts the music_player_loop for the given guild."""
guild_id = member.guild.id
if self.bot.user == member:
"""Ensure the music_player_loop is alive/dead"""
if after.channel is not None:
await self.ensure_guild_thread(guild_id)
elif after.channel is None:
await self.terminate_guild_thread(guild_id)
else:
"""Terminate when you are left alone on a channel"""
voice = discord.utils.get(self.bot.voice_clients, guild__id=guild_id)
if voice is not None and len(voice.channel.members) < 2:
# FIXME: possible race condition on idle timeout
await voice.disconnect()
await self.terminate_guild_thread(guild_id)
async def music_player_loop(self, guild_data: GuildData):
"""Task which handles the queue list, cross-guild in theory (wip)."""
self.log.info(f"music_player_loop:{guild_data.guild_id}:start")
while True:
try:
(song_request, ctx) = \
await asyncio.wait_for(guild_data.queue.get(), self.settings.idle_timeout)
self.log.info("song request: " + str(song_request))
player = await YTDLSource.from_url(song_request, loop=self.bot.loop, stream=True)
playing_current_song = asyncio.Event()
def on_song_end(error):
if error is not None:
self.log.error("Player error: %s", error)
playing_current_song.set()
ctx.voice_client.play(player, after=on_song_end)
await ctx.send(f"Now playing: {player.title}")
await playing_current_song.wait()
if guild_data.loop:
try:
guild_data.queue.put_nowait((song_request, ctx))
except asyncio.QueueFull:
pass
except asyncio.CancelledError:
self.log.info(f"music_player_loop:{guild_data.guild_id}:cancelled")
return
except asyncio.TimeoutError:
self.log.info(f"music_player_loop:{guild_data.guild_id}:timeout")
voice = discord.utils.get(self.bot.voice_clients, guild__id=guild_data.guild_id)
if voice is not None:
await voice.disconnect()
return
except Exception as exception: # pylint: disable=W0703
self.log.warning(f"music_player_loop:{guild_data.guild_id}:uncaught exception: {exception}")
@commands.command()
async def join(self, ctx, *, channel: discord.VoiceChannel = None):
"""
Either join a given voice channel or move to the author voice channel.
If no channel is specified, connect to the user's current voice channel.
"""""
if channel is None:
if ctx.author.voice is None:
return await ctx.message.reply("You are not connected to a voice channel.")
channel = ctx.author.voice.channel
if ctx.voice_client is not None:
await ctx.voice_client.move_to(channel)
else:
await channel.connect()
await self.ensure_guild_thread(ctx.guild.id)
@commands.command(aliases=["cicca"])
async def cichero(self, ctx):
"""Great classic."""
return await self.play(ctx, song_request="https://www.youtube.com/watch?v=DAuPe14li4g")
@commands.command(aliases=["jhon"])
async def john(self, ctx):
"""He truly is."""
return await self.play(ctx, song_request="https://www.youtube.com/watch?v=dALcFSyFcXs")
@commands.command()
async def bastardo(self, ctx):
"""Mia mamma è una brava donna, napoletana ma brava"""
return await self.play(ctx, song_request="https://www.youtube.com/watch?v=12gmyUCqLxA")
@commands.command(aliases=["p", "youtube", "yt"])
async def play(self, ctx, *, song_request: str):
"""Add youtube song to playlist."""
async with ctx.typing():
guild_data = self.guild_data[ctx.guild.id]
songs = [ytdl.extract_info(song_request, download=False)]
if "entries" in songs[0]:
# YouTube playlist
songs = list(song for song in songs[0]["entries"])
for song in songs:
try:
guild_data.queue.put_nowait((song["url"], ctx))
except asyncio.QueueFull:
await ctx.message.reply(
f"Song queue is full (Max size: {guild_data.queue.maxsize})")
return
await ctx.message.reply("Song added to the queue" if len(songs) == 1
else f"Added {len(songs)} songs to the queue.")
@commands.command(aliases=["clear", "clean", "hairottoilcazzo"])
async def stop(self, ctx, *, reply=True):
"""Clear the queue and stop playing music"""
guild_data = self.guild_data[ctx.guild.id]
try:
while True:
guild_data.queue.get_nowait()
except asyncio.QueueEmpty:
await self.skip(ctx)
if reply:
await ctx.message.reply("Song queue cleared and music stopped")
@commands.command(aliases=["kill", "terminate", "harakiri"])
async def disconnect(self, ctx):
"""Clear the queue, stop playing music and disconnect from the channel"""
await self.stop(ctx, reply=False)
if ctx.voice_client is not None:
await ctx.voice_client.disconnect()
@commands.command(aliases=["next"])
async def skip(self, ctx):
"""Skip the song that is currently playing."""
if ctx.voice_client is not None and ctx.voice_client.is_playing():
ctx.voice_client.stop()
@commands.command()
async def loop(self, ctx):
"""Toggle the loop functionality"""
async with ctx.typing():
guild_data = self.guild_data[ctx.guild.id]
guild_data.loop = not guild_data.loop
await ctx.message.reply(f"Loop {'activated' if guild_data.loop else 'deactivated'}")
@commands.command(aliases=["die", "roll"])
async def dice(self, ctx, num: int = 1, sides: int = 20, show_sides: bool = True):
"""Roll an n sided dice"""
if sides < 1 or sides > 0x1337 or num < 1 or num > 40:
return await ctx.message.reply("You have been added to a list.")
if num == 1:
return await ctx.message.reply((f"[d{sides}] " if show_sides else "") +
f"You rolled a {secrets.randbelow(sides) + 1}")
rolls = [secrets.randbelow(sides) + 1 for _ in range(num)]
return await ctx.message.reply(
f"[{num}d{sides}] You rolled {'+'.join([str(r) for r in rolls])} = {sum(rolls)}")
# pylint: disable=C0103
@commands.command()
async def d4(self, ctx, n=1):
"""Roll a 4-sided dice"""
await self.dice(ctx, sides=4, num=n, show_sides=False)
@commands.command()
async def d6(self, ctx, n=1):
"""Roll a 6-sided dice"""
await self.dice(ctx, sides=6, num=n, show_sides=False)
@commands.command()
async def d8(self, ctx, n=1):
"""Roll a 8-sided dice"""
await self.dice(ctx, sides=8, num=n, show_sides=False)
@commands.command()
async def d10(self, ctx, n=1):
"""Roll a 10-sided dice"""
await self.dice(ctx, sides=10, num=n, show_sides=False)
@commands.command()
async def d12(self, ctx, n=1):
"""Roll a 10-sided dice"""
await self.dice(ctx, sides=12, num=n, show_sides=False)
@commands.command()
async def d20(self, ctx, n=1):
"""Roll a 20-sided dice"""
await self.dice(ctx, sides=20, num=n, show_sides=False)
@commands.command()
async def d100(self, ctx, n=1):
"""Roll a 100-sided dice"""
await self.dice(ctx, sides=100, num=n, show_sides=False)
@play.before_invoke
@cichero.before_invoke
@john.before_invoke
@bastardo.before_invoke
async def ensure_voice(self, ctx):
"""Pre-hook used to ensure you the bot is connected to a voice channel before starting to
play music."""
return await self.join(ctx)
def main():
"""Entrypoint for antipatibot program"""
logging.basicConfig(level=logging.INFO)
logging.getLogger("discord").setLevel(logging.WARNING)
log = logging.getLogger("antipatibot")
# log.setLevel(logging.DEBUG)
settings = BotSettings()
bot = commands.Bot(command_prefix=commands.when_mentioned_or(settings.command_prefix),
description="AntipatiBot")
log.sanitize = lambda message: str(message).replace(":", "_") \
.replace("\r", "\\r") \
.replace("\n", "\\n") \
.replace("\t", "\\t")
bot.add_cog(AntipatiBot(bot, log, settings))
try:
discord_api_file = "/antipatibot/discord_token.txt"
if os.path.exists(discord_api_file) and os.path.isfile(discord_api_file):
with open(discord_api_file, encoding='utf8') as file:
settings.discord_token = file.read().strip("\n\r\t ")
bot.run(settings.discord_token)
except discord.errors.LoginFailure:
log.error("invalid_discord_token:Please set a valid discord bot API token.")
if __name__ == "__main__":
main()
|
antipatico/antipatibot
|
antipatibot.py
|
antipatibot.py
|
py
| 14,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39672933944
|
import pytest
import common
@pytest.mark.parametrize(
"data,start,end",
[
("0-0", 0, 0),
("11-22", 11, 22),
],
)
def test_parse(data: str, start: int, end: int):
assert common.SectionRange.parse(data) == common.SectionRange(start, end)
@pytest.mark.parametrize(
"range1,range2,res",
[
("1-2", "3-4", False),
("1-2", "2-3", False),
("1-2", "2-2", True),
("1-3", "2-2", True),
("2-2", "2-2", True),
("2-3", "2-2", True),
("2-3", "1-2", False),
("2-3", "1-1", False),
],
)
def test_contains(range1: str, range2: str, res: bool):
sr1 = common.SectionRange.parse(range1)
sr2 = common.SectionRange.parse(range2)
assert sr1.contains(sr2) == res
@pytest.mark.parametrize(
"range1,range2,res",
[
("1-2", "3-4", False),
("1-2", "2-3", True),
("1-2", "2-2", True),
("1-3", "2-2", True),
("2-2", "2-2", True),
("2-3", "2-2", True),
("2-3", "1-2", True),
("2-3", "1-1", False),
],
)
def test_overlaps(range1: str, range2: str, res: bool):
sr1 = common.SectionRange.parse(range1)
sr2 = common.SectionRange.parse(range2)
assert sr1.overlaps(sr2) == res
|
cmatsuoka/aoc
|
2022 - expedition/04 - camp cleanup/test_common.py
|
test_common.py
|
py
| 1,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26197481166
|
"""
Data import
https://github.com/tategallery/collection
"""
import json
import os
import sys
import time
import pandas as pd
csv_file = '/media/joji/DATA/workspace/data/artwork_data.csv'
selected_columns = ['id', 'artist', 'title', 'medium', 'year',
'acquisitionYear', 'height', 'width', 'units']
def main():
df = pd.read_csv(csv_file, index_col='id', usecols=selected_columns)
print(df)
print(f"Unique artists: {len(df['artist'].unique())}")
# print(len(set(df['artist'])))
tf_series = df['artist'] == 'Bacon, Francis'
print(tf_series.value_counts())
print(type(tf_series.value_counts()))
print(df['artist'].value_counts()['Bacon, Francis'])
if __name__ == '__main__':
t01 = time.time()
main()
t02 = time.time()
print(f'{sys.argv[0]} took {t02 - t01:.0f} seconds')
|
jojimpv/pandasdemo1
|
demo3.py
|
demo3.py
|
py
| 841 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12691211626
|
import os
from twilio.rest import Client
from urllib.request import urlopen
import re
import time
import smtplib
#need twilio credientials to run
account_sid = os.environ["TWILIO_ACCOUNT_SID"]
auth_token = os.environ["TWILIO_AUTH_TOKEN"]
twilio_number = os.environ["TWILIO_NUMBER"]
ubc_url = "https://courses.students.ubc.ca/cs/main?pname=subjarea&tname=subjareas&req=5&dept="
WAIT_TIME = 20
def sendMessage(message, phonenumber):
client = Client(account_sid, auth_token)
client.messages.create(
to=phonenumber,
from_=twilio_number,
body=message
)
def check_seats(url, user_info, regex_objects):
web_page_text = urlopen(url).read()
htmlText = web_page_text.decode("utf8")
general = re.search(regex_objects["general_seats"], htmlText)
restricted = re.search(regex_objects["restricted_seats"], htmlText)
temp_unavailable = htmlText.find("Note: this section is temp. unavailable")
print("Still looking...")
print("Restricted Seats: ", restricted.group(1))
print("General Seats: ", general.group(1))
if temp_unavailable != -1:
return 3
if not general or not restricted:
print("Something went wrong, maybe you put the wrong url in or lost internet connection, try restarting")
return 0
if general.group(1) != '0':
return 1
if restricted.group(1) != '0':
return 2
else:
return 0
def compile_regex():
regexs = {}
regexs["general_seats"] = re.compile("<td width='200px'>General Seats Remaining:</td><td align='left'><strong>(.*?)</strong></td>")
regexs["restricted_seats"] = re.compile("<td width='200px'>Restricted Seats Remaining\*:</td><td align='left'><strong>(.*?)</strong></td>")
return regexs
#gathers nessesary user info
def gather_user_info():
user_info = {}
user_info['department'] = input("Enter department:")
user_info["course_number"] = input("Enter course number: ")
user_info["section"] = input("Enter section number: ")
user_info["phone_number"] = input("Enter phone number:(in format +xxxxxxxxxxx) ")
user_info["restricted"] = input("Are restricted seats okay?(yes/no)")
return user_info
def main():
user_info = gather_user_info()
print(user_info['department'])
defined_url = ubc_url + user_info["department"] + "&course=" + user_info["course_number"] + "§ion=" + user_info["section"]
regex_objects = compile_regex()
compile_regex()
while True:
status = check_seats(defined_url, user_info, regex_objects)
if status == 1:
print("GENERAL SEAT AVAILABLE SENDING MESSAGE")
sendMessage('There is a general seat available in ' + user_info["department"] + ' ' + user_info["course_number"] + '! Grab it here: ' + defined_url, user_info["phone_number"])
break
if status == 2:
if user_info["restricted"] == "yes":
print("RESTRICTED SEAT AVAILABLE")
sendMessage('There is a restricted seat available in ' + user_info["department"] + ' ' + user_info["course_number"] + '! Grab it here: ' + defined_url, user_info["phone_number"])
break
if status == 3:
print("The course is temporarily unavailable")
time.sleep(WAIT_TIME)
else:
time.sleep(WAIT_TIME)
if __name__ == "__main__":
main()
|
benkenj/UBCCourseNotifier
|
UBCCourseNotifierMain.py
|
UBCCourseNotifierMain.py
|
py
| 3,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71456631549
|
import boto3
from operator import itemgetter
ecr_client = boto3.client('ecr')
repositories = ecr_client.describe_repositories()['repositories']
if len(repositories) == 0:
print("Repository is empty!")
for repo in repositories:
print(f"Repository name: {repo['repositoryName']}")
query_repository_name = "java-maven-app"
images = ecr_client.describe_images(
repositoryName=query_repository_name
)
image_tags = []
for image in images['imageDetails']:
image_tags.append({
'tag': image['imageTags'],
'date': image['imagePushedAt']
})
sorted_images_based_on_date = sorted(image_tags, key=itemgetter("date"), reverse=True)
for image in sorted_images_based_on_date:
print(image)
|
ArshaShiri/DevOpsBootcampPythonAutomationAssignment
|
ecr_in_aws.py
|
ecr_in_aws.py
|
py
| 728 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6923620355
|
#encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import StringIO
import json
import logging
import random
import urllib
import urllib2
# functions
import responseHandler
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
# global variables
from config import TOKEN
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
location = message.get('location')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
# location - weather
if location:
lat = location.get('latitude')
lon = location.get('longitude')
weather = responseHandler.locationInput(lat, lon)
responseHandler.sendWeather(chat_id, weather)
return
# command
if text:
if text.startswith('/'):
if text.lower() == '/start':
responseHandler.sendTextMessage(chat_id, '幸会。')
setEnabled(chat_id, True)
# 待办:最好把这里做成每晚定时
elif text.lower() == '/weathertmr':
LAT = 57.63
LON = 18.31
weather = responseHandler.locationInput(LAT, LON)
responseHandler.forecastWeather(chat_id, weather)
elif text.lower() == '/stop':
responseHandler.sendTextMessage(chat_id, '好,下次再说。')
setEnabled(chat_id, False)
else:
responseHandler.sendTextMessage(chat_id, '什么?')
else:
responseHandler.replyMessage(chat_id, message_id, '嗯')
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
eglantine-shell/xiaoyiqingbot-2022
|
main.py
|
main.py
|
py
| 3,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16543818867
|
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import (
makeExpressionMakeTuple,
makeExpressionMakeTupleOrConstant,
)
from nuitka.nodes.DictionaryNodes import makeExpressionMakeDictOrConstant
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionRef,
makeExpressionFunctionCall,
makeExpressionFunctionCreation,
)
from nuitka.nodes.KeyValuePairNodes import makeExpressionPairs
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableAssignNodes import makeStatementAssignmentVariable
from nuitka.nodes.VariableRefNodes import ExpressionTempVariableRef
from nuitka.PythonVersions import python_version
from .ComplexCallHelperFunctions import (
getFunctionCallHelperDictionaryUnpacking,
getFunctionCallHelperKeywordsStarDict,
getFunctionCallHelperKeywordsStarList,
getFunctionCallHelperKeywordsStarListStarDict,
getFunctionCallHelperPosKeywordsStarDict,
getFunctionCallHelperPosKeywordsStarList,
getFunctionCallHelperPosKeywordsStarListStarDict,
getFunctionCallHelperPosStarDict,
getFunctionCallHelperPosStarList,
getFunctionCallHelperPosStarListStarDict,
getFunctionCallHelperStarDict,
getFunctionCallHelperStarList,
getFunctionCallHelperStarListStarDict,
)
from .ReformulationDictionaryCreation import buildDictionaryUnpackingArgs
from .ReformulationSequenceCreation import buildListUnpacking
from .TreeHelpers import (
buildNode,
buildNodeTuple,
getKind,
makeStatementsSequenceFromStatements,
)
def buildCallNode(provider, node, source_ref):
called = buildNode(provider, node.func, source_ref)
if python_version >= 0x350:
list_star_arg = None
dict_star_arg = None
positional_args = []
# For Python3.5 compatibility, the error handling with star argument last
# is the old one, only with a starred argument before that, things use the
# new unpacking code.
for node_arg in node.args[:-1]:
if getKind(node_arg) == "Starred":
assert python_version >= 0x350
list_star_arg = buildListUnpacking(provider, node.args, source_ref)
positional_args = ()
break
else:
if node.args and getKind(node.args[-1]) == "Starred":
assert python_version >= 0x350
list_star_arg = buildNode(provider, node.args[-1].value, source_ref)
positional_args = buildNodeTuple(provider, node.args[:-1], source_ref)
else:
positional_args = buildNodeTuple(provider, node.args, source_ref)
# Only the values of keyword pairs have a real source ref, and those only
# really matter, so that makes sense.
keys = []
values = []
for keyword in node.keywords[:-1]:
if keyword.arg is None:
assert python_version >= 0x350
outline_body = ExpressionOutlineBody(
provider=provider, name="dict_unpacking_call", source_ref=source_ref
)
tmp_called = outline_body.allocateTempVariable(
temp_scope=None, name="called"
)
helper_args = [
ExpressionTempVariableRef(variable=tmp_called, source_ref=source_ref),
makeExpressionMakeTuple(
elements=buildDictionaryUnpackingArgs(
provider=provider,
keys=(keyword.arg for keyword in node.keywords),
values=(keyword.value for keyword in node.keywords),
source_ref=source_ref,
),
source_ref=source_ref,
),
]
dict_star_arg = makeExpressionFunctionCall(
function=makeExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getFunctionCallHelperDictionaryUnpacking(),
source_ref=source_ref,
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=helper_args,
source_ref=source_ref,
)
outline_body.setChildBody(
makeStatementsSequenceFromStatements(
makeStatementAssignmentVariable(
variable=tmp_called, source=called, source_ref=source_ref
),
StatementReturn(
expression=_makeCallNode(
called=ExpressionTempVariableRef(
variable=tmp_called, source_ref=source_ref
),
positional_args=positional_args,
keys=keys,
values=values,
list_star_arg=list_star_arg,
dict_star_arg=dict_star_arg,
source_ref=source_ref,
),
source_ref=source_ref,
),
)
)
return outline_body
# For Python3.5 compatibility, the error handling with star argument last
# is the old one, only with a starred argument before that, things use the
# new unpacking code.
if node.keywords and node.keywords[-1].arg is None:
assert python_version >= 0x350
dict_star_arg = buildNode(provider, node.keywords[-1].value, source_ref)
keywords = node.keywords[:-1]
else:
keywords = node.keywords
for keyword in keywords:
keys.append(
makeConstantRefNode(
constant=keyword.arg, source_ref=source_ref, user_provided=True
)
)
values.append(buildNode(provider, keyword.value, source_ref))
if python_version < 0x350:
list_star_arg = buildNode(provider, node.starargs, source_ref, True)
dict_star_arg = buildNode(provider, node.kwargs, source_ref, True)
return _makeCallNode(
called=called,
positional_args=positional_args,
keys=keys,
values=values,
list_star_arg=list_star_arg,
dict_star_arg=dict_star_arg,
source_ref=source_ref,
)
def _makeCallNode(
called, positional_args, keys, values, list_star_arg, dict_star_arg, source_ref
):
# Many variables, but only to cover the many complex call cases.
if list_star_arg is None and dict_star_arg is None:
result = makeExpressionCall(
called=called,
args=makeExpressionMakeTupleOrConstant(
elements=positional_args,
user_provided=True,
source_ref=source_ref,
),
kw=makeExpressionMakeDictOrConstant(
makeExpressionPairs(keys=keys, values=values),
user_provided=True,
source_ref=source_ref,
),
source_ref=source_ref,
)
# Bug compatible line numbers before Python 3.8
if python_version < 0x380:
if values:
result.setCompatibleSourceReference(
source_ref=values[-1].getCompatibleSourceReference()
)
elif positional_args:
result.setCompatibleSourceReference(
source_ref=positional_args[-1].getCompatibleSourceReference()
)
return result
else:
# Dispatch to complex helper function for each case. These do
# re-formulation of complex calls according to Developer Manual.
key = (
bool(positional_args),
bool(keys),
list_star_arg is not None,
dict_star_arg is not None,
)
table = {
(True, True, True, False): getFunctionCallHelperPosKeywordsStarList,
(True, False, True, False): getFunctionCallHelperPosStarList,
(False, True, True, False): getFunctionCallHelperKeywordsStarList,
(False, False, True, False): getFunctionCallHelperStarList,
(True, True, False, True): getFunctionCallHelperPosKeywordsStarDict,
(True, False, False, True): getFunctionCallHelperPosStarDict,
(False, True, False, True): getFunctionCallHelperKeywordsStarDict,
(False, False, False, True): getFunctionCallHelperStarDict,
(True, True, True, True): getFunctionCallHelperPosKeywordsStarListStarDict,
(True, False, True, True): getFunctionCallHelperPosStarListStarDict,
(False, True, True, True): getFunctionCallHelperKeywordsStarListStarDict,
(False, False, True, True): getFunctionCallHelperStarListStarDict,
}
get_helper = table[key]
helper_args = [called]
if positional_args:
helper_args.append(
makeExpressionMakeTupleOrConstant(
elements=positional_args,
user_provided=True,
source_ref=source_ref,
)
)
# Order of evaluation changed in Python3.5.
if python_version >= 0x350 and list_star_arg is not None:
helper_args.append(list_star_arg)
if keys:
helper_args.append(
makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(keys=keys, values=values),
user_provided=True,
source_ref=source_ref,
)
)
# Order of evaluation changed in Python3.5.
if python_version < 0x350 and list_star_arg is not None:
helper_args.append(list_star_arg)
if dict_star_arg is not None:
helper_args.append(dict_star_arg)
result = makeExpressionFunctionCall(
function=makeExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=get_helper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=helper_args,
source_ref=source_ref,
)
# Bug compatible line numbers before Python 3.8
if python_version < 0x380:
result.setCompatibleSourceReference(
source_ref=helper_args[-1].getCompatibleSourceReference()
)
return result
|
Nuitka/Nuitka
|
nuitka/tree/ReformulationCallExpressions.py
|
ReformulationCallExpressions.py
|
py
| 10,742 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
10747823823
|
'''S3 uploader module'''
import os
import time
import signal
import sys
import boto3
# This module seems to have some issues. pylint ignore them
from setproctitle import setproctitle, getproctitle # pylint: disable=E0611
from kafkatos3.ThreadPool import ThreadPool
def upload_file(self, filename):
'''horrible callback function outside the class because it needs to be pickable'''
self.upload_file_to_s3(filename)
class S3Uploader(object):
'''class for uploading files to s3'''
def __init__(self, config, logger):
'''constructor'''
self.config = config
self.logger = logger
self.pool = None
def upload_file_to_s3(self, filename):
'''upload file to s3'''
self.logger.info("Uploading file: " + filename + " to s3")
working_dir = self.config.get("main", "working_directory")
s3_key = "kafkatos3" + filename.replace(working_dir + "/tos3", "")
self.logger.info("S3 key is " + s3_key)
if self.config.get("s3", "s3_access_key") != "":
access_key = self.config.get("s3", "s3_access_key")
secret_key = self.config.get("s3", "s3_secret_key")
s3client = boto3.client("s3", aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
else:
s3client = boto3.client("s3")
bucket = self.config.get("s3", "s3_bucket_name")
s3client.upload_file(filename, bucket, s3_key)
os.remove(filename)
def run(self):
'''main executor'''
def cb_exit_gracefully(signum, frame):
'''callback to exit gracefully for a pool thread'''
self.logger.info("Shutting down S3Uploader, signum %d"% (signum))
sys.exit(0)
self.logger.info("S3Uploader process starting up")
self.pool = ThreadPool(int(self.config.get("s3", "s3uploader_workers")))
setproctitle("[s3upload] " + getproctitle())
signal.signal(signal.SIGINT, cb_exit_gracefully)
signal.signal(signal.SIGTERM, cb_exit_gracefully)
while True:
tos3_dir = os.path.join(self.config.get(
"main", "working_directory"), "tos3")
files = self.get_files(tos3_dir, ".gz")
self.pool.map(self.upload_file_to_s3, files)
time.sleep(float(self.config.get(
"s3", "s3upload_check_interval")))
sys.exit(0)
def get_files(self, directory, extension):
''' return a list of files in a directory recusively based on extension'''
file_list = []
for dirpath, _, files in os.walk(directory):
for filename in files:
fname = os.path.join(dirpath, filename)
filename, file_extension = os.path.splitext(fname)
if file_extension == extension:
file_list.append(fname)
return file_list
|
snowch/kafkatos3
|
kafkatos3/S3Uploader.py
|
S3Uploader.py
|
py
| 2,919 |
python
|
en
|
code
| null |
github-code
|
6
|
40128810884
|
#!/usr/bin/env python3
import itertools
from collections import defaultdict
from heapq import heappush, heappop
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.buffer.readline
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
def debug(*x):
print(*x, file=sys.stderr)
def blute_solve(N, AS):
"void()"
buf = []
def blute(xs, buf):
debug("blute: xs, buf", xs, buf)
if not xs:
return 0
if not buf:
# first player score 0
return blute(xs[1:], [xs[0]])
# insert
candidate = []
for i in range(len(buf)):
s = min(buf[i - 1], buf[i])
newBuf = buf[:]
newBuf.insert(0, xs[0])
candidate.append(blute(xs[1:], newBuf) + s)
return max(candidate)
candidate = []
for xs in itertools.permutations(range(N)):
candidate.append(blute(xs, buf))
return max(candidate)
def solve(N, AS):
buf = []
AS.sort(reverse=True)
ret = AS[0]
for i in range(N - 2):
ret += AS[1 + i // 2]
return ret
def main():
N = int(input())
AS = list(map(int, input().split()))
print(solve(N, AS))
T1 = """
4
2 2 1 3
"""
def test_T1():
"""
>>> as_input(T1)
>>> main()
7
"""
T0 = """
3
3 2 1
"""
def test_T0():
"""
>>> as_input(T0)
>>> main()
5
"""
T2 = """
7
1 1 1 1 1 1 1
"""
def test_T2():
"""
>>> as_input(T2)
>>> main()
6
"""
def _test():
import doctest
doctest.testmod()
def as_input(s):
"use in test, use given string as input file"
import io
global read, input
f = io.StringIO(s.strip())
def input():
return bytes(f.readline(), "ascii")
def read():
return bytes(f.read(), "ascii")
USE_NUMBA = False
if (USE_NUMBA and sys.argv[-1] == 'ONLINE_JUDGE') or sys.argv[-1] == '-c':
print("compiling")
from numba.pycc import CC
cc = CC('my_module')
cc.export('solve', solve.__doc__.strip().split()[0])(solve)
cc.compile()
exit()
else:
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if (USE_NUMBA and sys.argv[-1] != '-p') or sys.argv[-1] == "--numba":
# -p: pure python mode
# if not -p, import compiled module
from my_module import solve # pylint: disable=all
elif sys.argv[-1] == "-t":
_test()
sys.exit()
elif sys.argv[-1] != '-p' and len(sys.argv) == 2:
# input given as file
input_as_file = open(sys.argv[1])
input = input_as_file.buffer.readline
read = input_as_file.buffer.read
main()
|
nishio/atcoder
|
abc173/d.py
|
d.py
|
py
| 2,643 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31574941802
|
from PIL import Image
import sys
import argparse
from os.path import exists
import time
import math
import re
import tkinter as tk
from tkinter import filedialog, ttk
import threading
import population
import province
import vicmap
mod_dir_loc = ""
save_file_loc = ""
map_type = ""
global_population = 0
mean_savings = 0
sd_savings = 0
all_pops = []
test_map = None
progress = None
gui_mode = True
verbose = False
out_file_location = "map_out.png"
def vprint(s):
if verbose:
print(s)
if gui_mode:
pass # Do something to the progress bar idk
# Loads a game file from the mod directory, or if missing, from the game directory.
def get_game_file_loc(location):
if exists(mod_dir_loc + location):
return mod_dir_loc + location
elif exists(game_dir + location):
return game_dir + location
else:
sys.exit("File not found " + location)
def split_dec(line):
sides = line.split("=")
return (sides[0].strip(), sides[1].strip())
def open_save(location):
return open(location, "r", encoding='iso-8859-1')
def read_save(save_file):
i = 0
current_prov = None
for line in save_file:
i = i + 1
if bool(re.search("^\d+=$", line)):
current_prov = int(split_dec(line)[0])
save_file.__next__()
if not bool(re.search("\tname", save_file.__next__())):
continue
save_file.__next__()
line = save_file.__next__()
if re.search("^}$", line.strip()):
province.id_dict[current_prov].is_water = True
elif bool(re.search(population.pop_regex, line.strip())):
population.POP(save_file, current_prov, split_dec(line)[0])
elif bool(re.search("battle=", line)):
province.make_battle(save_file)
vprint(f"Lines parsed: {i}")
def load_UI():
global progress
window = tk.Tk()
window.title("Victoria 2 Mapper")
save_file_entry = tk.Entry(width=100)
mod_dir = tk.Entry(width=100)
def set_mod_dir():
global mod_dir_loc
mod_dir_loc = tk.filedialog.askdirectory()
mod_dir.insert(0, mod_dir_loc)
ld_mod = tk.Button(text="Choose Mod", command=set_mod_dir)
def set_save_file():
global save_file_loc
save_file_loc = tk.filedialog.askopenfilename()
save_file_entry.insert(0, save_file_loc)
ld_save = tk.Button(text="Choose Save", command=set_save_file)
map_type_entry = tk.Entry(width = 100)
def make_map_ui():
make_map(map_type_entry.get())
make_button = tk.Button(text="Make Map", command=threading.Thread(target=make_map_ui).start)
progress = tk.ttk.Progressbar()
tk.Label(text="Save File:").grid(row = 0, column = 0, padx=3, pady=3)
save_file_entry.grid(row = 0, column = 1, padx=3, pady=3)
ld_save.grid(row = 0, column = 2, padx=3, pady=3)
tk.Label(text="Mod Directory:").grid(row = 1, column = 0, padx=3, pady=3)
mod_dir.grid(row = 1, column = 1, padx=3, pady=3)
ld_mod.grid(row = 1, column = 2, padx=3, pady=3)
tk.Label(text="Parameters:").grid(row = 2, column = 0, padx=3, pady=3)
map_type_entry.grid(row = 2, column = 1, padx=3, pady=3)
make_button.grid(row = 3, column = 1, padx=3, pady=3)
progress.grid(row = 4, column = 1, padx=3, pady=3)
window.mainloop()
# Map Function #
def draw_map(map_func):
global test_map
# Some poorly made maps have invalid colors, this uses the previous color as a backup.
prev_color = None # Previous color used on the province map
prev_draw = None
for x in range(vicmap.MAP_W):
for y in range(vicmap.MAP_H):
this_color = vicmap.pixel_map[x, y]
if (this_color == (0, 0, 0)):
this_color = prev_color
this_prov = province.color_dict[this_color]
test_map[x, vicmap.MAP_H - y - 1] = map_func(this_prov, x, y)
prev_color = this_color
def pop_attr_map(attr):
attr_dict = {
"religion" : population.religions,
"culture" : population.cultures,
"kind" : population.pop_types
}
attr_list = attr_dict[attr]
def out_func(this_prov, x, y):
out_colors = ((0, 0, 0),)
if this_prov.is_water:
return (255, 255, 255)
rel_tuple = this_prov.most_populous(attr)
out_colors = (attr_list[rel_tuple[0]], attr_list[rel_tuple[-1]])
if len(out_colors) > 1 and (x + y) % 5 == 0:
return out_colors[1]
else:
return out_colors[0]
return out_func
def pop_attr_heatmap(attr, kind):
most = province.get_most(attr, kind)
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
return (int(255 * (this_prov.get_amnt(attr, kind)/most)), 0, 0)
return out_func
def pop_attr_percent_map(attr, kind):
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
return (int(255 * (this_prov.get_amnt(attr, kind)/this_prov.total_pop)), 0, 0)
return out_func
def pop_average_savings():
most = 0
for prov in province.provinces:
if not prov.is_water and prov.total_pop > 0:
prov.avg_savings = sum([pop.money for pop in prov.POPs]) / prov.total_pop
most = prov.avg_savings if prov.avg_savings > most else most
print(most)
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
return (int(255 * (this_prov.avg_savings/most)), 0, 0)
return out_func
def pop_magnitude_savings():
most = 0
for prov in province.provinces:
if not prov.is_water and prov.total_pop > 0:
prov.mag_savings = ((sum([pop.money for pop in prov.POPs]) / prov.total_pop) - mean_savings) / sd_savings
most = prov.mag_savings if prov.mag_savings > most else most
print(most)
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
if this_prov.mag_savings < 0:
return (255, 0, 0)
return (0, 255, 0)
return out_func
def pop_total_savings():
most = 0
for prov in province.provinces:
if not prov.is_water and prov.total_pop > 0:
prov.total_savings = sum([pop.money for pop in prov.POPs])
most = prov.total_savings if prov.total_savings > most else most
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
return (int(255 * (this_prov.total_savings/most)), 0, 0)
return out_func
def population_heatmap():
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
return (int(255*(this_prov.total_pop/province.largest_prov_pop)), 0, 0)
return out_func
def battle_death_map():
most = max(province.provinces, key=lambda prov: prov.battle_deaths).battle_deaths
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
return (int(255*(this_prov.battle_deaths/most)), 0, 0)
return out_func
def make_map(params):
global global_population, mean_savings, sd_savings, all_pops, test_map, progress, save_file_loc, mod_dir_loc
# Intertpret what kind of map the user wants.
map_types = {
"population" : (population_heatmap, 0),
"total_savings" : (pop_total_savings, 0),
"average_savings" : (pop_average_savings, 0),
"magnitude_savings" : (pop_magnitude_savings, 0),
"attr_percent" : (pop_attr_percent_map, 2),
"attr_heatmap" : (pop_attr_heatmap, 2),
"attr" : (pop_attr_map, 1),
"battle_deaths" : (battle_death_map, 0)
}
params = params.split(' ')
map_type_func = map_types[params[0]][0]
map_type_param_amnt = map_types[params[0]][1]
map_type_func_params = None
if map_type_param_amnt == 0:
map_type_func_params = ()
else:
map_type_func_params = tuple(params[1:1+map_type_param_amnt])
population.make_pop_regex()
vprint("Loading Files...")
vicmap.load_map(get_game_file_loc("/map/provinces.bmp"))
province.load_provinces(get_game_file_loc("/map/definition.csv"))
population.load_culture(get_game_file_loc("/common/cultures.txt"))
vprint("Reading Save...")
save_file = open_save(save_file_loc)
read_save(save_file)
vprint("Doing Stats...")
for prov in province.provinces:
prov.get_population()
for prov in province.provinces:
all_pops += prov.POPs
global_population = sum([prov.total_pop for prov in province.provinces])
mean_savings += sum([pop.money for pop in all_pops]) / global_population
sd_savings = math.sqrt(sum([((pop.money / pop.size - mean_savings)**2) * pop.size for pop in all_pops]) / global_population)
img = Image.new('RGB', (vicmap.MAP_W, vicmap.MAP_H), "BLACK")
test_map = img.load()
vprint("Drawing Map...")
draw_map(map_type_func(*map_type_func_params))
img.save(out_file_loc)
#img.show()
def print_license():
license = """
MIT License
Copyright (c) 2020 neopythagorean
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
print (license)
def command_line():
global mod_dir_loc, save_file_loc, out_file_loc, verbose
parser = argparse.ArgumentParser(description='Mapping tool for Victoria 2.')
parser.add_argument('desc', type=str, nargs='?', help='map description string')
parser.add_argument('-o', type=str, nargs='?', default='map_out.png', help='out file')
parser.add_argument('-s', type=str, nargs=1, help='save file')
parser.add_argument('-m', type=str, nargs=1, help='mod directory')
parser.add_argument('-g', type=str, nargs=1, help='game directory')
parser.add_argument('--verbose', action='store_true', help='print debug info')
parser.add_argument('--gui', action='store_true', help='force GUI')
parser.add_argument('--license', action='store_true', help='show license information')
p_args = parser.parse_args(sys.argv[1:])
verbose = p_args.verbose
vprint("--VICTORIA 2 MAPPER--")
if p_args.gui:
# Force GUI
load_UI()
return
if p_args.license:
print_license()
return
mod_dir_loc = p_args.m[0]
game_dir = p_args.g[0]
save_file_loc = p_args.s[0]
out_file_loc = p_args.o
start = time.perf_counter()
make_map(p_args.desc)
elapsed = time.perf_counter() - start
vprint(f"Done in {elapsed:.3f}s")
def main():
if len(sys.argv) == 1:
# No Arguments -- load GUI
load_UI()
else:
command_line()
main()
|
neopythagorean/vic2mapper
|
src/mapper.py
|
mapper.py
|
py
| 12,604 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27214693785
|
"""
Overview:
Functions to deal with encoding binary data easily.
"""
import sys
from typing import Optional, List
import chardet
from ..collection import unique
_DEFAULT_ENCODING = 'utf-8'
_DEFAULT_PREFERRED_ENCODINGS = ['utf-8', 'gbk', 'gb2312', 'gb18030', 'big5'] # common encodings for chinese
__all__ = [
'auto_decode'
]
def _decode(data: bytes, encoding: str) -> str:
return data.decode(encoding)
def auto_decode(data: bytes, encoding: Optional[str] = None, prefers: Optional[List[str]] = None) -> str:
r"""
Overview:
Auto decode binary data to string, the encoding mode will be automatically detected.
Arguments:
- data (:obj:`bytes`): Original binary data to be decoded.
- encoding (:obj:`Optional[str]`): Encoding mode to be used, default is ``None`` which \
means this function need to automatically detect the encoding.
- prefers (:obj:`Optional[List[str]]`): Prefered encodings.
Returns:
- str (:obj:`str`): Decoded string.
Examples::
>>> auto_decode(b'kdsfjldsjflkdsmgds') # 'kdsfjldsjflkdsmgds'
>>> auto_decode(b'\xd0\x94\xd0\xbe\xd0\xb1\xd1\x80\xd1\x8b\xd0\xb9 \xd0'
... b'\xb2\xd0\xb5\xd1\x87\xd0\xb5\xd1\x80') # "Добрый вечер"
>>> auto_decode(b'\xa4\xb3\xa4\xf3\xa4\xd0\xa4\xf3\xa4\xcf') # "こんばんは"
>>> auto_decode(b'\xcd\xed\xc9\xcf\xba\xc3') # "晚上好"
"""
if encoding:
return _decode(data, encoding)
else:
if prefers is None:
prefers = _DEFAULT_PREFERRED_ENCODINGS
_elist = filter(bool, unique([
*prefers,
sys.getdefaultencoding(),
chardet.detect(data)['encoding']
]))
last_err = None
for enc in _elist:
try:
return _decode(data, enc)
except UnicodeDecodeError as err:
if last_err is None or err.start > last_err.start:
last_err = err
raise last_err
|
HansBug/hbutils
|
hbutils/encoding/decode.py
|
decode.py
|
py
| 2,040 |
python
|
en
|
code
| 7 |
github-code
|
6
|
15792066200
|
import argparse
from os import listdir, makedirs
from os.path import isfile, join, basename, dirname, isdir
from PIL import Image
from tqdm import tqdm
# folder_path = 'photos'
# left, top, right, bottom = 559, 225, 1361, 0
# -d ./photos -s ./photos2 -c -a 559 225 1361 0
def build_argparse():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help = 'perform the action on all photos in a directory', type = str,
default = False)
parser.add_argument('-f', '--file', help = 'perform the action on one photo', type = str, default = False)
parser.add_argument('-n', '--name', help = 'new file name', type = str, default = '')
parser.add_argument('-s', '--save', help = 'destination directory to save the new photos', type = str,
default = False)
parser.add_argument('-c', '--crop', help = 'crop the image(s) in a rectangle', action = 'store_true')
parser.add_argument('-a', '--area', help = 'define the rectangle to crop, the order of sequence is: left, top, '
'right, bottom', type = int, nargs = 4, default = [0, 0, 0, 0])
parser.add_argument('-l', '--left', help = 'the left pixel from to crop', type = int, default = 0)
parser.add_argument('-t', '--top', help = 'the top pixel from to crop', type = int, default = 0)
parser.add_argument('-r', '--right', help = 'the right pixel from to crop', type = int, default = 0)
parser.add_argument('-b', '--bottom', help = 'the bottom pixel from to crop', type = int, default = 0)
args = parser.parse_args()
return args
def photo_crop(image_path, save_to_path, left = 0, top = 0, right = 0, bottom = 0, number = 0, new_file_name = ''):
with Image.open(image_path) as image:
image_colors = image.load()
if left == 0 and right == 0 and top == 0 and bottom == 0:
right, bottom = image.size
if bottom == 0:
for y in range(400, image.size[1]):
if image_colors[1358, y] == (181, 181, 181, 255):
bottom = y
bottom += 2
cropped_image = image.crop((left, top, right, bottom))
if new_file_name == '':
cropped_image.save(join(save_to_path, basename(image_path)))
else:
if '.' in new_file_name:
cropped_image.save(join(save_to_path, '{}{:02d}.{}'.format(new_file_name.split('.')[0], number if number > 0 else '', basename(new_file_name).split('.')[1])))
else:
cropped_image.save(join(save_to_path, '{}{:02d}.{}'.format(new_file_name.split('.')[0], number if number > 0 else '', basename(image_path).split('.')[1])))
def all_files_in_folder(folder_path):
return [join(folder_path, f) for f in listdir(folder_path) if isfile(join(folder_path, f))]
def main():
args = build_argparse()
if args.crop:
if args.area != [0, 0, 0, 0]:
left, top, right, bottom = args.area
else:
left = args.left
top = args.top
right = args.right
bottom = args.bottom
if args.directory:
if args.save:
path_to_save = args.save
if not isdir(path_to_save):
makedirs(path_to_save)
else:
path_to_save = args.directory
for i, image_path in enumerate(tqdm(all_files_in_folder(args.directory))):
photo_crop(image_path, path_to_save, left, top, right, bottom, i+1, args.name)
print("The operations are completed check the {} folder for the photos.".format(path_to_save))
elif args.file:
if args.save:
path_to_save = args.save
if not isdir(path_to_save):
makedirs(path_to_save)
else:
path_to_save = dirname(args.file)
photo_crop(args.file, path_to_save, left, top, right, bottom, 0, args.name)
print("The operation is completed check the {} folder for the photo.".format(path_to_save))
main()
|
zsoman/photo_editing
|
PhotoCropper.py
|
PhotoCropper.py
|
py
| 4,114 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38194485687
|
# -*- coding: utf-8 -*-
"""
env:python3
author:Jiashuai Liu
to choose from 3 types by 5 characters
"""
import numpy as np
# Random consistency index
RI_dict = {1: 0, 2: 0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45}
# The weight is obtained and calculated by sum method
def get_w(array):
row = array.shape[0] # Calculate the order
a_axis_0_sum = array.sum(axis=0)
# print(a_axis_0_sum)
New = array / a_axis_0_sum
# print(b)
b_axis_0_sum = New.sum(axis=0) # sum by column, not used behind
b_axis_1_sum = New.sum(axis=1) # added according to the rows, get eigenvectors of each row
# print(b_axis_1_sum)
w = b_axis_1_sum / row # normalization(eigenvectors)
AW = (w * array).sum(axis=1)
# print(AW)
max_max = sum(AW / (row * w)) # Maximum eigenvalue
# print(max_max)
CI = (max_max - row) / (row - 1)
CR = CI / RI_dict[row]
if CR < 0.1:
# print(round(CR, 3))
# print('meet the consistency')
# print(np.max(w))
# print(sorted(w,reverse=True))
# print('the Maximum eigenvalue is ',max_max)
# print('eigenvector is :%s' % w)
return w
else:
print(round(CR, 3)) # Rounding, followed by decimal places
print('Do not meet the consistency, please modify')
def main(array):
if type(array) is np.ndarray:
return get_w(array)
else:
print('Please enter numpy object')
# define the first layer, the shape is 5*5
if __name__ == '__main__':
e = np.array([[1, 2, 7, 5, 5], [1 / 2, 1, 4, 3, 3], [1 / 7, 1 / 4, 1, 1 / 2, 1 / 3], [1 / 5, 1 / 3, 2, 1, 1],
[1 / 5, 1 / 3, 3, 1, 1]])
print("the shape of e is", e.shape)
# the follow 5 matrix define the second layer, the shape of them is 3*3
a = np.array([[1, 1 / 3, 1 / 8], [3, 1, 1 / 3], [8, 3, 1]])
b = np.array([[1, 2, 5], [1 / 2, 1, 2], [1 / 5, 1 / 2, 1]])
c = np.array([[1, 1, 3], [1, 1, 3], [1 / 3, 1 / 3, 1]])
d = np.array([[1, 3, 4], [1 / 3, 1, 1], [1 / 4, 1, 1]])
f = np.array([[1, 4, 1 / 2], [1 / 4, 1, 1 / 4], [2, 4, 1]])
e = main(e)
a = main(a)
b = main(b)
c = main(c)
d = main(d)
f = main(f)
try:
res = np.array([a, b, c, d, f])
# print(res)
ret = (np.transpose(res) * e).sum(axis=1)
print(ret)
except TypeError:
print('Data error, may not meet the consistency, please modify')
|
Liujiashuai/mathematical-modeling
|
AHP.py
|
AHP.py
|
py
| 2,504 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19886899760
|
import os
import pytest
from pendulum import datetime
from pathlib import Path
from hypothesis_trio.stateful import (
initialize,
rule,
run_state_machine_as_test,
TrioAsyncioRuleBasedStateMachine,
)
from hypothesis import strategies as st
from guardata.client.types import EntryID, LocalFileManifest, Chunk
from guardata.client.fs.storage import WorkspaceStorage
from guardata.client.fs.workspacefs.file_transactions import FSInvalidFileDescriptor
from guardata.client.fs.exceptions import FSRemoteBlockNotFound
from tests.common import freeze_time, call_with_control
class File:
def __init__(self, local_storage, manifest):
self.fresh_manifest = manifest
self.entry_id = manifest.id
self.local_storage = local_storage
def ensure_manifest(self, **kwargs):
manifest = self.local_storage.manifest_storage._cache[self.entry_id]
for k, v in kwargs.items():
assert getattr(manifest, k) == v
def is_cache_ahead_of_persistance(self):
return self.entry_id in self.local_storage.manifest_storage._cache_ahead_of_localdb
async def get_manifest(self):
return await self.local_storage.get_manifest(self.entry_id)
async def set_manifest(self, manifest):
async with self.local_storage.lock_manifest(self.entry_id):
await self.local_storage.set_manifest(self.entry_id, manifest)
def open(self):
return self.local_storage.create_file_descriptor(self.fresh_manifest)
@pytest.fixture
async def foo_txt(alice, alice_file_transactions):
local_storage = alice_file_transactions.local_storage
now = datetime(2000, 1, 2)
placeholder = LocalFileManifest.new_placeholder(alice.device_id, parent=EntryID(), now=now)
remote_v1 = placeholder.to_remote(author=alice.device_id, timestamp=now)
manifest = LocalFileManifest.from_remote(remote_v1)
async with local_storage.lock_entry_id(manifest.id):
await local_storage.set_manifest(manifest.id, manifest)
return File(local_storage, manifest)
@pytest.mark.trio
async def test_close_unknown_fd(alice_file_transactions):
with pytest.raises(FSInvalidFileDescriptor):
await alice_file_transactions.fd_close(42)
@pytest.mark.trio
async def test_operations_on_file(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
fd = foo_txt.open()
assert isinstance(fd, int)
with freeze_time("2000-01-03"):
await file_transactions.fd_write(fd, b"hello ", 0)
await file_transactions.fd_write(fd, b"world !", -1)
await file_transactions.fd_write(fd, b"H", 0)
await file_transactions.fd_write(fd, b"", 0)
assert foo_txt.is_cache_ahead_of_persistance()
fd2 = foo_txt.open()
await file_transactions.fd_write(fd2, b"!!!", -1)
data = await file_transactions.fd_read(fd2, 1, 0)
assert data == b"H"
await file_transactions.fd_close(fd2)
foo_txt.ensure_manifest(
size=16,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
data = await file_transactions.fd_read(fd, 5, 6)
assert data == b"world"
await file_transactions.fd_close(fd)
assert not foo_txt.is_cache_ahead_of_persistance()
fd2 = foo_txt.open()
data = await file_transactions.fd_read(fd2, -1, 0)
assert data == b"Hello world !!!!"
await file_transactions.fd_close(fd2)
assert not foo_txt.is_cache_ahead_of_persistance()
foo_txt.ensure_manifest(
size=16,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
@pytest.mark.trio
async def test_flush_file(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
fd = foo_txt.open()
foo_txt.ensure_manifest(
size=0,
is_placeholder=False,
need_sync=False,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 2),
)
with freeze_time("2000-01-03"):
await file_transactions.fd_write(fd, b"hello ", 0)
await file_transactions.fd_write(fd, b"world !", -1)
assert foo_txt.is_cache_ahead_of_persistance()
foo_txt.ensure_manifest(
size=13,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
await file_transactions.fd_flush(fd)
assert not foo_txt.is_cache_ahead_of_persistance()
await file_transactions.fd_close(fd)
assert not foo_txt.is_cache_ahead_of_persistance()
foo_txt.ensure_manifest(
size=13,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
@pytest.mark.trio
async def test_block_not_loaded_entry(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
foo_manifest = await foo_txt.get_manifest()
chunk1_data = b"a" * 10
chunk2_data = b"b" * 5
chunk1 = Chunk.new(0, 10).evolve_as_block(chunk1_data)
chunk2 = Chunk.new(10, 15).evolve_as_block(chunk2_data)
foo_manifest = foo_manifest.evolve(blocks=((chunk1, chunk2),), size=15)
async with file_transactions.local_storage.lock_entry_id(foo_manifest.parent):
await foo_txt.set_manifest(foo_manifest)
fd = foo_txt.open()
with pytest.raises(FSRemoteBlockNotFound):
await file_transactions.fd_read(fd, 14, 0)
await file_transactions.local_storage.set_chunk(chunk1.id, chunk1_data)
await file_transactions.local_storage.set_chunk(chunk2.id, chunk2_data)
data = await file_transactions.fd_read(fd, 14, 0)
assert data == chunk1_data + chunk2_data[:4]
@pytest.mark.trio
async def test_load_block_from_remote(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
# Prepare the backend
workspace_id = file_transactions.remote_loader.workspace_id
await file_transactions.remote_loader.create_realm(workspace_id)
foo_manifest = await foo_txt.get_manifest()
chunk1_data = b"a" * 10
chunk2_data = b"b" * 5
chunk1 = Chunk.new(0, 10).evolve_as_block(chunk1_data)
chunk2 = Chunk.new(10, 15).evolve_as_block(chunk2_data)
foo_manifest = foo_manifest.evolve(blocks=((chunk1, chunk2),), size=15)
await foo_txt.set_manifest(foo_manifest)
fd = foo_txt.open()
await file_transactions.remote_loader.upload_block(chunk1.access, chunk1_data)
await file_transactions.remote_loader.upload_block(chunk2.access, chunk2_data)
await file_transactions.local_storage.clear_clean_block(chunk1.access.id)
await file_transactions.local_storage.clear_clean_block(chunk2.access.id)
data = await file_transactions.fd_read(fd, 14, 0)
assert data == chunk1_data + chunk2_data[:4]
size = st.integers(min_value=0, max_value=4 * 1024 ** 2) # Between 0 and 4MB
@pytest.mark.slow
@pytest.mark.skipif(os.name == "nt", reason="Windows file style not compatible with oracle")
def test_file_operations(
tmpdir, hypothesis_settings, reset_testbed, file_transactions_factory, alice, alice_backend_cmds
):
tentative = 0
class FileOperationsStateMachine(TrioAsyncioRuleBasedStateMachine):
async def start_transactions(self):
async def _transactions_controlled_cb(started_cb):
async with WorkspaceStorage.run(alice, Path("/dummy"), EntryID()) as local_storage:
file_transactions = await file_transactions_factory(
self.device, alice_backend_cmds, local_storage=local_storage
)
await started_cb(file_transactions=file_transactions)
self.transactions_controller = await self.get_root_nursery().start(
call_with_control, _transactions_controlled_cb
)
@initialize()
async def init(self):
nonlocal tentative
tentative += 1
await reset_testbed()
self.device = alice
await self.start_transactions()
self.file_transactions = self.transactions_controller.file_transactions
self.local_storage = self.file_transactions.local_storage
self.fresh_manifest = LocalFileManifest.new_placeholder(
alice.device_id, parent=EntryID()
)
self.entry_id = self.fresh_manifest.id
async with self.local_storage.lock_entry_id(self.entry_id):
await self.local_storage.set_manifest(self.entry_id, self.fresh_manifest)
self.fd = self.local_storage.create_file_descriptor(self.fresh_manifest)
self.file_oracle_path = tmpdir / f"oracle-test-{tentative}.txt"
self.file_oracle_fd = os.open(self.file_oracle_path, os.O_RDWR | os.O_CREAT)
async def teardown(self):
if not hasattr(self, "fd"):
return
await self.file_transactions.fd_close(self.fd)
os.close(self.file_oracle_fd)
@rule(size=size, offset=size)
async def read(self, size, offset):
data = await self.file_transactions.fd_read(self.fd, size, offset)
os.lseek(self.file_oracle_fd, offset, os.SEEK_SET)
expected = os.read(self.file_oracle_fd, size)
assert data == expected
@rule(content=st.binary(), offset=size)
async def write(self, content, offset):
await self.file_transactions.fd_write(self.fd, content, offset)
os.lseek(self.file_oracle_fd, offset, os.SEEK_SET)
os.write(self.file_oracle_fd, content)
@rule(length=size)
async def resize(self, length):
await self.file_transactions.fd_resize(self.fd, length)
os.ftruncate(self.file_oracle_fd, length)
@rule()
async def reopen(self):
await self.file_transactions.fd_close(self.fd)
self.fd = self.local_storage.create_file_descriptor(self.fresh_manifest)
os.close(self.file_oracle_fd)
self.file_oracle_fd = os.open(self.file_oracle_path, os.O_RDWR)
run_state_machine_as_test(FileOperationsStateMachine, settings=hypothesis_settings)
|
bitlogik/guardata
|
tests/client/fs/workspacefs/test_file_transactions.py
|
test_file_transactions.py
|
py
| 10,393 |
python
|
en
|
code
| 9 |
github-code
|
6
|
32371281333
|
import cv2
import matplotlib.pyplot as plt
def plotImg(img):
if len(img.shape) == 2:
plt.imshow(img, cmap='gray')
plt.show()
else:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
t=0
img = cv2.imread('cv.png')
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
binary_img = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV, 131, 15)
plotImg(binary_img)
_, _, boxes, _ = cv2.connectedComponentsWithStats(binary_img)
# first box is the background
boxes = boxes[1:]
filtered_boxes = []
for x,y,w,h,pixels in boxes:
if h < 100 and w < 100 and h > 10 and w > 10:
filtered_boxes.append((x,y,w,h))
t=t+1
for x,y,w,h in filtered_boxes:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,0,255),2)
print(t)
plotImg(img)
|
RisinPhoenix12/Computer-Vision
|
dots.py
|
dots.py
|
py
| 818 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25097367954
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 19 13:24:42 2022
@author: maria
"""
import numpy as np
import pandas as pd
from numpy import zeros, newaxis
import matplotlib.pyplot as plt
import scipy as sp
from scipy.signal import butter,filtfilt,medfilt
import csv
import re
import functions2022_07_15 as fun
import cProfile
animal= 'Hedes'
date= '2022-07-21'
#note: if experiment type not known, put 'suite2p' instead
experiment = '2'
#the file number of the NiDaq file, not alway experiment-1 because there might have been an issue with a previous acquisition etc
file_number = '1'
log_number = '1'
plane_number = '1'
#IMPORTANT: SPECIFY THE FRAME RATE
frame_rate = 15
#the total amount of seconds to plot
seconds = 5
#specify the cell for single cell plotting
filePathlog = 'Z://RawData//'+animal+ '//'+date+ '//'+experiment+'//Log'+log_number+'.csv'
#%%
#getting stimulus identity
Log_list = fun.GetStimulusInfo (filePathlog, props = ["Ori", "SFreq", "TFreq", "Contrast"])
cProfile.run('re.compile("foo|bar")')
#%%
#converting the list of dictionaries into an array and adding the time of the stimulus
#worked easiest by using pandas dataframe
log = np.array(pd.DataFrame(Log_list).values).astype(np.float64)
#log[0] is the degrees, log[1] would be spatial freq etc (depending on the order in the log list)
#no of stimuli specifes the total amount of stim shown
# nr_stimuli = aligned.shape[1]
# #log_Ori takes the first column of the log array because that corresponds to the first elelment in props in the GetStimulusInfo function above
log_Ori = log[:,0].reshape(480,)
#first getting the angles available, usually only 4 when trying other parameters
angles = np.array([0, 90, 180, 270])
#Temp freq
TFreq = np.array([0.5, 1, 2, 4, 8, 16])
SFreq = np.array([0.01, 0.02, 0.04, 0.08, 0.16, 0.32])
#%%
#getting indices for the same type of stim, ie same orientation and same temp frep
#for one angle
reps = 20
all_oneP_TFreq = np.zeros((reps, TFreq.shape[0])).astype(int)
log_TFreq = log[:,2]
#for angle in range(angles.shape[0]):
angle = 0
for freq in range(TFreq.shape[0]): #and j in range(TFreq.shape[0]):
specific_P = np.where((log[:,0] == angles[angle]) & (log_TFreq == TFreq[freq])) [0]
all_oneP_TFreq[:, freq] = specific_P
#all_TFreq =
#%% for all angles
#getting a 3D array with shape(orientation, repeats, TFreq)
reps = 20
all_TFreq = np.zeros((angles.shape[0], reps, TFreq.shape[0])).astype(int)
for angle in range(angles.shape[0]):
for freq in range(TFreq.shape[0]): #and j in range(TFreq.shape[0]):
specific_P = np.where((log[:,0] == angles[angle]) & (log_TFreq == TFreq[freq])) [0]
all_TFreq[angle, :, freq] = specific_P
cProfile.run('re.compile("foo|bar")')
|
mariacozan/Analysis_and_Processing
|
functions/2022-07-19_checking_log_file.py
|
2022-07-19_checking_log_file.py
|
py
| 2,748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27984730572
|
from Circle import Circle
def main():
circle1 = Circle()
print(circle1.radius, " ", format(circle1.get_area(), ".4f"))
circle2 = Circle(5)
print(circle2.radius, " ", format(circle2.get_area(), ".3f"))
circle3 = Circle(25)
print(circle3.radius, " ", format(circle3.get_area(), ".3f"))
circle4 = Circle(100)
print(circle4.radius, " ", format(circle4.get_area(), ".3f"))
main()
|
skyclouds2001/Python-Learning
|
study-7/7-2.py
|
7-2.py
|
py
| 413 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30984637396
|
from datetime import datetime
from constants import ProducerTypes
from events.producers import get_producer
from events.utils import get_routing_key
from models import (
Task,
TaskCost,
)
from popug_schema_registry.models.v1.task_cost_added_event_schema import (
TaskCostAddedEventSchema,
)
def send_taskcost_added_event(taskcost: TaskCost, task: Task) -> None:
producer = get_producer(ProducerTypes.TASKCOSTS_BC)
event = TaskCostAddedEventSchema(
data={
"public_id": taskcost.public_id,
"task_public_id": task.public_id,
"debit_cost": taskcost.debit_cost,
"credit_cost": taskcost.credit_cost,
},
produced_at=datetime.utcnow(),
)
producer.publish_message(
event.json().encode("utf-8"),
get_routing_key(event.title, event.version),
)
|
Drozdetskiy/popug_jira
|
popug_accounting/src/events/taskcost/send_event.py
|
send_event.py
|
py
| 857 |
python
|
en
|
code
| 5 |
github-code
|
6
|
41211788790
|
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
import os
import json
from PIL import Image
import requests
from io import BytesIO
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def cosine_similarity(ratings):
sim = ratings.dot(ratings.T)
if not isinstance(sim, np.ndarray):
sim = sim.toarray()
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def main():
y_test = []
x_test = []
FILE_PATH = "/content/gdrive/MyDrive/TER/MoviesDataBase/movie_1202"
IMAGE_BASE_PATH = "https://image.tmdb.org/t/p/w500"
for movie in os.listdir(FILE_PATH):
if movie.split(".")[1] != "json":
continue
movie_id = movie.split('_')[1].split('.')[0]
fr = open(FILE_PATH + "/" + movie)
movie_model = json.load(fr)
fr.close()
if movie_model['poster_path']:
img_path = IMAGE_BASE_PATH + movie_model['poster_path']
html = requests.get(img_path, verify=False)
poster = Image.open(BytesIO(html.content))
poster_img = poster.crop()
if poster:
# img = image.load_img(poster_img, target_size=(224, 224))
img = poster_img.resize((224, 224))
# img.show()
y_test.append(movie_id)
x = image.img_to_array(img)
# print(movie_id)
# print(x[:,:,0])
# print(np.shape(x[:,:,0]))
# exit(0)
if np.shape(x)[2] == 1:
x = np.stack((x[:, :, 0],) * 3, axis=-1)
x = np.expand_dims(x, axis=0)
if len(x_test) > 0:
# print(np.shape(x_test))
# print(np.shape(x))
# exit(0)
x_test = np.concatenate((x_test, x))
else:
x_test = x
x_test = preprocess_input(x_test)
model = ResNet50(weights='imagenet', include_top=False)
features = model.predict(x_test)
# print(np.shape(features))
# print(len(y_test))
features_compress = features.reshape(len(y_test), 7 * 7 * 2048)
# print(np.shape(features_compress))
# sim = cosine_similarity(features_compress)
image_sample = Image.open("/content/gdrive/MyDrive/TER/Test/image2.jpg")
imageS = image_sample.crop()
thisImage = imageS.resize((224, 224))
my_image = image.img_to_array(thisImage)
my_x = np.expand_dims(my_image, axis=0)
my_x = preprocess_input(my_x)
my_features = model.predict(my_x)
my_features_compress = my_features.reshape(1, 7 * 7 * 2048)
new_features = np.append(features_compress, my_features_compress, axis=0)
# print(np.shape(new_features))
# exit(0)
sim = cosine_similarity(new_features)
# print("sim:", np.shape(sim))
top = np.argsort(-sim[-1, :], axis=0)[1:3]
recommend = [y_test[i] for i in top]
print(recommend)
# print(sim)
if __name__ == "__main__":
main()
|
ming19956/PFE
|
information-retrival-search-engine/informationRetrival/resnet50/resnet50.py
|
resnet50.py
|
py
| 3,180 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30779245065
|
#Ask user for name
name=input("Enter your name: ")
#Ask user for the age
age=input("Enter your age: ")
#Ask city
city= input("Enter the city name you live in: ")
#Ask what they enjoy
like=input("Enter what you like to do:")
#Create output text
sentence="Your name is {} and you are {} years old. you live in {} and you love {}"
output=sentence.format(name,age,city,like)
#Print output
print(output)
|
Sruti-Dey/python_mini_projects
|
02_hello_you.py
|
02_hello_you.py
|
py
| 404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33055278390
|
import psycopg2 as psy
import pandas as pd
# Class for connect and write Postgresql
class PsycopgPostgresWarehouse:
def __init__(self, host, database, user, pw, port):
self.host = host
self.database = database
self.user = user
self.pw = pw
self.port = port
# Connect Database
def connect_database(self) -> str:
try:
conn = psy.connect(
host=self.host,
database=self.database,
user=self.user,
password=self.pw,
port=self.port
)
print("Connect Success")
except:
print("Failed connect")
return conn
# Connect database query data to Dataframe
@staticmethod
def to_dataframe(conn, query) -> pd.DataFrame:
df = pd.read_sql(query, conn)
return df
# https://www.mindphp.com/developer/tips-python/7907-static-method-in-python.html#:~:text=Static
# %20method%20%E0%B8%84%E0%B8%B7%E0%B8%AD%20%E0%B8%81%E0%B8%B2%E0%B8%A3%E0%B8%97%E0%B8%B3%E0%B9%83%E0%B8%AB%E0%B9
# %89,%E0%B9%80%E0%B8%A3%E0%B8%B5%E0%B8%A2%E0%B8%81%E0%B9%80%E0%B8%A1%E0%B8%98%E0%B8%AD%E0%B8%94%E0%B9%81%E0%B8
# %95%E0%B9%88%E0%B8%A5%E0%B8%B0%E0%B8%84%E0%B8%A3%E0%B8%B1%E0%B9%89%E0%B8%87%20%E0%B9%80%E0%B8%8A%E0%B9%88%E0%B8
# %99
@staticmethod
def execute_mogrify_upsert(conn, dataframe, column_unique, table):
df_not_unique_key = dataframe.drop(columns=column_unique)
upsert_value = ["EXCLUDED." + s for s in df_not_unique_key.columns]
tpls = [tuple(x) for x in dataframe.to_numpy()]
cols = ','.join(list(dataframe.columns))
cols_2 = ','.join(list(df_not_unique_key.columns))
cols_3 = ','.join(upsert_value)
no_column = len(dataframe.columns)
add_no_column_to_insert = "%s," * no_column
add = add_no_column_to_insert.rstrip(add_no_column_to_insert[-1])
cursor = conn.cursor()
values = [cursor.mogrify("(" + add + ")", tup).decode('utf8') for tup in tpls]
sql = "INSERT INTO %s(%s) VALUES " % (table, cols) + ",".join(
values) + " ON CONFLICT (" + column_unique + ") DO UPDATE SET (" + cols_2 + ")" + " = (" + cols_3 + ")"
# print(sql)
cursor.execute(sql)
print("Data upserted using execute_mogrify() successfully. : " + str(table) + "")
conn.commit()
cursor.close()
@staticmethod
def execute_mogrify_upsert_single_column(conn, dataframe, column_unique, table):
df_not_unique_key = dataframe.drop(columns=column_unique)
upsert_value = ["EXCLUDED." + s for s in df_not_unique_key.columns]
tpls = [tuple(x) for x in dataframe.to_numpy()]
cols = ','.join(list(dataframe.columns))
cols_2 = ','.join(list(df_not_unique_key.columns))
cols_3 = ','.join(upsert_value)
no_column = len(dataframe.columns)
add_no_column_to_insert = "%s," * no_column
add = add_no_column_to_insert.rstrip(add_no_column_to_insert[-1])
cursor = conn.cursor()
values = [cursor.mogrify("(" + add + ")", tup).decode('utf8') for tup in tpls]
sql = "INSERT INTO %s(%s) VALUES " % (table, cols) + ",".join(
values) + " ON CONFLICT (" + column_unique + ") DO UPDATE SET " + cols_2 + "" + " = " + cols_3 + ""
# print(function)
cursor.execute(sql)
print("Data upserted using execute_mogrify() successfully. : " + str(table) + "")
conn.commit()
cursor.close()
@staticmethod
def execute_mogrify_insert(conn, dataframe, table):
# Creating a list of tupples from the dataframe values
tpls = [tuple(x) for x in dataframe.to_numpy()]
# dataframe columns with Comma-separated
cols = ','.join(list(dataframe.columns))
no_column = len(dataframe.columns)
add_no_column_to_insert = "%s," * no_column
add = add_no_column_to_insert.rstrip(add_no_column_to_insert[-1])
# SQL query to execute
cursor = conn.cursor()
values = [cursor.mogrify("(" + add + ")", tup).decode('utf8') for tup in tpls]
sql = "INSERT INTO %s(%s) VALUES " % (table, cols) + ",".join(values)
# print(function)
# try:
cursor.execute(sql)
# print(cursor.execute(function))
conn.commit()
print("Data inserted using execute_mogrify() successfully.")
cursor.close()
# except (Exception, psy.DatabaseError):
# print(psy.DatabaseError)
# print("Error")
# cursor.close()
# pass
|
Tana8M/data-engineer-assignment
|
pipeline/function/postgresql_function/psycopg2_postgresql.py
|
psycopg2_postgresql.py
|
py
| 4,631 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34111450286
|
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
''' from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
] '''
from django.conf.urls import url,include
from django.contrib import admin
from . import view,mysql
from ctrl import views
from controller import index
from controller import gui
# 包涵路由
other=[
url(r'^info/',view.info),
url(r'^psd/',view.psd),
]
urlpatterns = [
url(r'^$', view.hello),
url(r'^hi',view.hi),
url(r'^int/',view.myint),
url(r'^json/',view.json),
url(r'^res/',view.res),
url(r'^admin/', admin.site.urls),
url(r'^insert/',mysql.insert),
url(r'^find/',mysql.find),
url(r'^update/',mysql.update),
url(r'^del/',mysql.delete),
url(r'^user/',include(other)),
url(r'^cc/',views.cc),
url(r'^index/',index.index),
url(r'^gui/',gui.index),
url(r'^controller/',include('controller.urls')), #使用子路由
]
|
githubrghd/mydemo
|
python-demo/helloworld/helloworld/urls.py
|
urls.py
|
py
| 1,551 |
python
|
en
|
code
| null |
github-code
|
6
|
74672197946
|
#we use flag variables as a signal to the program for systems where multiple conditions may end a while loop
#we set the flag to either True or False
active = True
prompt = "Tell Me Your Name and I Will Give It A Godly Title "
message = ""
while active:
message = input(prompt)
if message == "quit":
active = False
else:
message += " The God"
print(message)
|
ncortezi/python_education_2023
|
chapter_7/intro_flags.py
|
intro_flags.py
|
py
| 374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10241474430
|
import scipy
import copy
import numpy as np
def gesd(x, **kwargs):
x_ = np.array(x)
alpha = 0.05 if 'alpha' not in kwargs else kwargs['alpha']
n_out = int(np.ceil(len(x_) * 0.1)) if 'n_out' not in kwargs else kwargs['n_out']
outlier_side = 0 if 'outlier_side' not in kwargs else kwargs['outlier_side']
alpha_ = alpha / 2 if outlier_side == 0 else alpha
n = len(x_)
temp = x_
R = np.zeros([n_out])
rm_idx = copy.copy(R).astype(int)
lam = copy.copy(R)
for j in range(n_out):
if outlier_side == -1:
sample = np.nanmin(temp)
rm_idx[j] = list(temp).index(sample)
R[j] = (np.nanmean(temp) - sample)
elif outlier_side == 0:
R[j] = np.nanmax(abs(temp - np.nanmean(temp)))
rm_idx[j] = np.argmax(abs(temp - np.nanmean(temp)))
else:
sample = np.nanmax(temp)
rm_idx[j] = list(temp).index(sample)
R[j] = (sample - np.nanmean(temp))
R[j] /= np.nanstd(temp)
temp[rm_idx[j]] = float('nan')
p = 1 - alpha_ / (n - j + 1)
t = scipy.stats.t.ppf(p, n - j - 1)
lam[j] = ((n - j) * t) / (np.sqrt((n - j - 1 + t ** 2) * (n - j + 1)))
idx = np.zeros(n).astype(bool)
if True in list(R > lam)[::-1]:
a_ = list(R > lam)[::-1].index(True)
b = rm_idx[0:a_]
idx[b] = True
x2 = x_[~idx]
return idx, x2
|
WHThhhh/Seeg_prepro
|
GESD_wht.py
|
GESD_wht.py
|
py
| 1,421 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23007665672
|
#!/usr/bin/env python
#
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Top levels scripts to extract castro data from an all-sky analysis
"""
import os
import argparse
import numpy as np
import yaml
from astropy import table
from fermipy import fits_utils
from dmpipe import dmp_roi
from dmpipe.dm_target import DMTargetFactory
def read_targets(filepath):
""" Read a set of targets from a fits file """
return read_targets_from_fits(filepath)
def read_targets_from_fits(fitsfile):
""" Read a set of targets from a fits file """
tab = table.Table.read(fitsfile)
mask = np.zeros(len(tab), bool)
key_col = tab['key']
for i in range(len(tab)):
mask[i] = key_col[i].find('point') != -1
tab_mask = tab[mask]
coords = np.ndarray((len(tab_mask), 2))
coords[:, 0] = tab_mask['glon']
coords[:, 1] = tab_mask['glat']
out_dict = {'targets': tab_mask['target'],
'coordinates': coords}
return out_dict
def read_targets_from_yaml(yamlfile):
""" Read a set of targets from a yaml file """
din = yaml.load(yamlfile)
coords = np.ndarray((len(din), 2))
for i, (key, val) in enumerate(din.items()):
coords[i, 0] = val['l']
coords[i, 1] = val['b']
out_dict = {'targets': list(din.keys()),
'coordinates': coords}
return out_dict
def add_columns(out_table, in_table, col_names):
""" Add columnes to a table """
for col in col_names:
out_table.add_column(in_table[col])
def main():
""" Hook for command line access """
# Argument defintion
usage = "usage: %(prog)s [input]"
description = "Collect all the new source"
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('--input', '-i', default='roi_set.yaml', help='ROI set definition file.')
parser.add_argument('--output', '-o', type=argparse.FileType('w'), help='Output file.')
parser.add_argument('--clobber', action='store_true', help='Overwrite output file.')
parser.add_argument('--targets', '-t', type=str, help='Target file.')
parser.add_argument('--filestr', '-f', default="tscube.fits",
help="Name of file within each ROI sub-directory")
# Argument parsing
args = parser.parse_args()
# Read the target file
targ_type = os.path.splitext(args.targets)[1]
print(targ_type)
if targ_type in ['.fit', '.fits']:
targets = DMTargetFactory.read_table(args.targets)
roster = None
else:
targets, roster = DMTargetFactory.make_table([args.targets])
# Get the sky_crds
sky_crds = DMTargetFactory.get_sky_crds(targets)
# Make the roi_set object
roi_set, basedir = dmp_roi.DMROISet.create_from_yaml(args.input)
# extract the data
out_table = roi_set.extract_table_data(sky_crds, args.filestr,
basedir=basedir, tables=["SCANDATA", "FITDATA"])
# add_names_column(out_table,targets['name'])
col_names = ['name', 'ra', 'dec', 'distance', 'proftype', 'glat', 'glon', 'j_integ', 'd_integ']
add_columns(out_table, targets, col_names)
ebounds_table = roi_set.extract_single_table(args.filestr, basedir=basedir, table="EBOUNDS")
# Write the output
fits_utils.write_tables_to_fits(args.output, [out_table, ebounds_table],
clobber=args.clobber, namelist=["SCANDATA", "EBOUNDS"])
if __name__ == '__main__':
main()
|
fermiPy/dmpipe
|
dmpipe/scripts/extract_castro_data.py
|
extract_castro_data.py
|
py
| 3,510 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18932829460
|
# === Úloha 5===
# Napíšte program, ktorý zistí pomocou funkcie, v ktorom týždni počas roka mal Janko najvyššie výdavky na sladkosti. Jankove výdavky vygeneruje program ako desatinné čísla v rozsahu (0€-2,55€) s presnosťou na dve desatinné miesta.
from random import randint
def najvyssie(vydavky):
i_najvisieho = 0
for i in range(len(vydavky)):
if vydavky[i] > vydavky[i_najvisieho]:
i_najvisieho = i
return i_najvisieho + 1 # na začiatku nemáme 0ty týždeň, ale prvý !
priklad_v = [randint(0, 255)/100 for i in range(4*12)] # 4 tyzdne pre kazdy mesiac
priklad_naj_tyzden= najvyssie(priklad_v)
print(f"vyd = {priklad_v}")
print(f"najvyssi vydavok je v {priklad_naj_tyzden}. tyzdni, teda {priklad_v[priklad_naj_tyzden - 1]}")
|
Plasmoxy/MaturitaInformatika2019
|
ulohyPL/u05.py
|
u05.py
|
py
| 775 |
python
|
sk
|
code
| 2 |
github-code
|
6
|
18464161519
|
# Libraries
import pandas as pd
import re, sqlite3
# Reading the data
data = pd.read_csv('Digimon_cards.csv')
# Connecting with the database
con = sqlite3.connect('Digimon_Cards.sqlite')
cur = con.cursor()
# Inserting the data
## Card type's table
insert_card_type = 'INSERT INTO Card_types(name) VALUES'
insert_card_type += '('
for card_type in data['Card_type'].unique():
insert_card_type += '"'+str(card_type) + '"'+'),('
insert_card_type = insert_card_type[:-2] +';'
cur.execute(insert_card_type)
con.commit()
## Color's table
insert_color = 'INSERT INTO Colors(name) VALUES'
insert_color += '('
for color in data['Color'].unique():
insert_color += '"'+str(color) + '"'+'),('
insert_color = insert_color[:-2] +';'
cur.execute(insert_color)
con.commit()
## Form's table
insert_form = 'INSERT INTO Forms(name) VALUES'
insert_form += '('
for form in data['Form'].unique():
insert_form += '"'+str(form) + '"'+'),('
insert_form = insert_form[:-2] +';'
insert_form = re.sub('\("-"\),', '', insert_form)
cur.execute(insert_form)
con.commit()
## Atribute's table
insert_attribute = 'INSERT INTO Attributes(name) VALUES'
insert_attribute += '('
for attribute in data['Attribute'].unique():
insert_attribute += '"'+str(attribute) + '"'+'),('
insert_attribute = insert_attribute[:-2] +';'
insert_attribute = re.sub('\("-"\),', '', insert_attribute)
cur.execute(insert_attribute)
con.commit()
## Digimon type's table
insert_digimon_type = 'INSERT INTO Digimon_types(name) VALUES'
insert_digimon_type += '('
for digimon_type in data['Digimon_type'].unique():
insert_digimon_type += '"'+str(digimon_type) + '"'+'),('
insert_digimon_type = insert_digimon_type[:-2] +';'
cur.execute(insert_digimon_type)
con.commit()
## Deck type's table
insert_deck_type = 'INSERT INTO Deck_types(name) VALUES'
insert_deck_type += '('
for deck_type in data['Deck_type'].unique():
insert_deck_type += '"'+str(deck_type) + '"'+'),('
insert_deck_type = insert_deck_type[:-2] +';'
cur.execute(insert_deck_type)
con.commit()
## Effect's table
### Combining all the effects features into one dataframe
Effects = list(data['Effect'])
Effects.extend(data['Digivolve_effect'])
Effects.extend(data['Security_effect'])
Effects = pd.DataFrame(Effects)[0].unique()
### Inserting the values
insert_effect = "INSERT INTO Effects(name) VALUES"
insert_effect = insert_effect + '('
for effect in Effects:
insert_effect = insert_effect + '"' + str(effect) + '"' + '),('
insert_effect = insert_effect[:-2]+ ';'
cur.execute(insert_effect)
con.commit()
## Digimon's data
colum_names = ['Card_type', 'Color', 'Form', 'Attribute','Digimon_type',
'Effect', 'Digivolve_effect', 'Security_effect', 'Deck_type']
insert_digimon = '''INSERT INTO Digimons(code, name, level, card_type_id, color_id, form_id, attribute_id,
digimon_type_id, DP, Play_cost, Digivolve_cost_1,
Digivolve_level_1, Digivolve_cost_2, Digivolve_level_2,
effect_id, digivolve_effect_id, security_effect_id, deck_type_id, 'Deck_name','Image_link') VALUES'''
for row in range(0,len(data)):
i = 0
insert_digimon += '('
for feature in data.iloc[row]:
title = data.columns.values[i]
feature = feature
if title in ['Effect', 'Digivolve_effect', 'Security_effect']:
title = 'Effect'
if title in colum_names:
select_query = 'SELECT id FROM '
select_query += str(title) + 's' + ' WHERE name = ? ;'
cur.execute(select_query, (feature, ))
try:
feature_id = cur.fetchone()[0]
insert_digimon += '"'+str(feature_id) + '"'+','
except:
insert_digimon += 'NULL,'
elif pd.isna(feature):
insert_digimon += 'NULL,'
else:
insert_digimon += '"' + str(feature) + '"'+','
i = i+1
insert_digimon = insert_digimon[:-1] +'),'
insert_digimon = insert_digimon[:-2] +');'
cur.execute(insert_digimon)
con.commit()
# Disconnecting from the database
con.close()
|
davidr9708/Digimon_Card_Game
|
Code/3_Data_insertion.py
|
3_Data_insertion.py
|
py
| 4,306 |
python
|
en
|
code
| 9 |
github-code
|
6
|
11081518318
|
# 'local file' or 'database'
data_source = 'local file'
# if data is loaded from local file, provide file name:
file_name = 'city_rides_2020.pkl'
# columns that the data is grouped by. Should be compatible with file name
# if data is loaded from file.
groupby_cols = ['from_city', 'dep_week']
# color column is scaled to match the colormap
# actually the limit end should match the number of colors in the colormap
# which is generated below. But since the distribution of pax per cap is so
# skewed, we decide to assign even to the top 2/3 values (scaled: 100 from 300)
# to have the color of the highest values.
scale_limits = (0, 300)
cmap_limit = 100
radius_scale = 2000
|
relaxingdave/network_visualization
|
config.py
|
config.py
|
py
| 678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35424778344
|
from art import higher_lower,vs
from game_data import data
import random
import os
#Display art
score = 0
game_continue = True
account_b = random.choice(data)
def format_data(account):
"""Takes the account data and return the printable format"""
account_name = account["name"]
account_desc = account["description"]
account_country = account["country"]
return f"{account_name}, a {account_desc}, from {account_country}"
def check_answer(guess, a_follower,b_follower):
""" Take the user guess and follower count an dreturn if they got it right."""
if a_follower > b_follower:
return guess == "a"
else:
return guess == "b"
print(higher_lower)
#make the game repeatable
while game_continue:
#Generate random data
#Making the accounts at position B become the next positon A
account_a = account_b
account_b = random.choice(data)
while account_a == account_b:
account_b = random.choice(data)
#Format the account data into printable format
print(f"Comapre A : {format_data(account_a)}")
print(vs)
print(f"Against B : {format_data(account_b)}")
#ask user guess
guess = input("Who has more followers? Type 'A' or 'B' : ").lower()
#Check if user is correct.
## Get follower counr of each account.
a_follower_account = account_a["follower_count"]
b_follower_account = account_b["follower_count"]
is_correct = check_answer(guess, a_follower_account, b_follower_account)
#Give user feedback on their guesss.
#score keeping
if is_correct:
score += 1
print(f"You are right! Current score : {score}")
else:
game_continue = False
print(f"Sorry, that's wrong! Current score : {score}")
#clear the screen between rounds
|
pav537/Python
|
Higher_Lower Game.py
|
Higher_Lower Game.py
|
py
| 1,900 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18677295647
|
#!/usr/bin/env python
"""
Unit tests for module `msg`.
"""
from msg import Msg
if __name__ == '__main__':
# for testing only
#import msg
m = Msg()
m.enable_color(True) # only required to force coloured output to file
m.prefix_set('myprog') # make a prefix for following msg's
m.info('Hello World (to stdout)')
m.msg('This is m.msg(), with a prefix "myprog" (to stdout)')
m.info('This is m.info() (to stdout)')
m.warn('This is m.warn() (to stderr)')
m.error("This is m.error() (to stderr)")
m.line()
m.warn('This is a multi-line m.warn() message.',
'This is the next line.', '(to stderr)')
m.prefix_add('color') # set a double level prefix
m.info(f"Double prefix '{m.prefixes}', from m.info() (to stdout)")
m.msg(f"m.msg() with prefix '{m.prefixes}'")
m.line()
m.msg('Make new default colours for m.info() and m.warn()')
m.set_colors(info_fore='LIGHTBLUE_EX', info_style='BRIGHT',
warn_fore='RED', warn_back='WHITE', warn_style='BRIGHT')
m.info('This is m.info()', 'with new default colours.')
m.warn('This is m.warn()', 'with new default colours.')
m.prefix_set('')
m.line()
m.msg('', 'Now back to m.msg() without prefixes.', '')
m.prefix_set('colors')
m.msg('Display all available foreground colours for m.info() with black background colour.')
all_colors = [
'BLACK','RED','GREEN','YELLOW','BLUE','MAGENTA','CYAN','WHITE',
'LIGHTBLACK_EX', 'LIGHTRED_EX','LIGHTGREEN_EX','LIGHTYELLOW_EX',
'LIGHTBLUE_EX', 'LIGHTMAGENTA_EX','LIGHTCYAN_EX','LIGHTWHITE_EX'
]
all_styles = [ 'NORMAL', 'BRIGHT', 'DIM' ]
for color in all_colors:
if color == 'BLACK': continue # don't need black on black
m.prefix_add(color)
m.set_colors(info_fore=color, info_back='black')
m.line(52)
for style in all_styles:
m.prefix_add(style)
m.set_colors(info_style=style)
m.info('Hello World.')
m.prefix_pop()
m.prefix_pop()
m.line()
m = Msg(prefixes=['textwrap'], use_color=True, columns=60, prefix_separator='> ')
m.prefix_add(f'default {m.columns} cols')
m.warn('This is a very, very, very long line that needs to be wrapped. Or else it will look like crap.', '')
m.prefix_pop()
m.prefix_add('disabled')
m.enable_textwrap(False)
m.line()
m.warn("This is a very, very, very long line that doesn't want to be wrapped. Even if it will look like crap.")
m.prefix_pop()
|
Open-Technology-Foundation/msg
|
unittests/msg-test.py
|
msg-test.py
|
py
| 2,402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39940042757
|
import numpy as np
import matplotlib.pyplot as plt
import os.path
import Style
import sys
zsims = ['3.61', '4.038','5.017']
simnom = ['SAGE']
cm = plt.get_cmap('tab10') # Colour map to draw colours from
path2sim = 'C:/Users/Olivia/TFG-TUT/'
for iiz, zsim in enumerate(zsims):
for sim in simnom:
ffav = path2sim + 'Datos_simulaciones/Medias_' + zsim + '.csv'
ffcorte = path2sim + 'Datos_simulaciones/Mass_SFR_corte_z_' + zsim + '.csv'
ff10 = path2sim + 'Datos_simulaciones/Mass_SFR_10_z_' + zsim + '.csv'
if not os.path.isfile(ffav):
continue
if not os.path.isfile(ffcorte):
continue
if not os.path.isfile(ff10):
continue
'''Definimos colores'''
cols = []
col = cm(1. * iiz / len(zsims))
cols.append(col)
#print(cols,col)
'''MEDIAS'''
ghist = np.loadtxt(ffav, skiprows=1, usecols=(0), unpack=True, delimiter=',')
avSFR = np.loadtxt(ffav, skiprows=1, usecols=(1), unpack=True, delimiter=',') #Msun h^-1 yr^-1
ErrorMass = np.loadtxt(ffav, skiprows=1, usecols=(2), unpack=True, delimiter=',')
'''CORTE'''
MassCorte = np.loadtxt(ffcorte, skiprows=1, usecols=(1), unpack=True, delimiter=',')
StarFRCorte = np.loadtxt(ffcorte, skiprows=1, usecols=(2), unpack=True, delimiter=',')
SFRCorte = StarFRCorte - 9 #Msun h^-1 yr^-1
'''10 PORCIENTO'''
Mass10 = np.loadtxt(ff10, skiprows=1, usecols=(1), unpack=True, delimiter=',')
StarFR10 = np.loadtxt(ff10, skiprows=1, usecols=(2), unpack=True, delimiter=',')
SFR10 = StarFR10 - 9 # Msun h^-1 yr^-1
indav = np.where(avSFR>0)
#indcorte = np.where(StarFR>0)
plt.style.use(Style.style1)
plt.plot(Mass10, SFR10, marker='.', color = 'steelblue', linewidth=0, label='10$\%$ SAGE z = ' + zsims[iiz] + '')
plt.plot(MassCorte, SFRCorte, marker='*', color = 'r', linewidth=0, label='corte SAGE z = ' + zsims[iiz] + '')
plt.plot(ghist[indav], avSFR[indav], marker='^', linewidth=0, color='k', label='SAGE z = ' + zsims[iiz] + '')
plt.errorbar(ghist[indav], avSFR[indav], yerr=ErrorMass[indav], xerr=None, fmt='.k')
plt.ylabel('log$_{10} \;$ (SFR $[M_{\odot} \; h^{-1}\; yr^{-1}$])')
plt.xlabel('log$_{10} \;$(M$ \; [M_{\odot} \; h^{-1} $])')
#plt.title('Media de la función SFR SAGE frente bines de masa de las galaxias')
#plt.xlim(8.4, 11.6)
plt.ylim(-2.5,3)
plotnom = path2sim + 'Figuras/Definitivas/Medias_corte_10_z_' + zsims[iiz] + '.png'
plt.legend()
plt.savefig(plotnom)
plt.show()
|
Ovive57/TFG-TUT
|
Dibujo_Medias_corte_10.py
|
Dibujo_Medias_corte_10.py
|
py
| 2,682 |
python
|
en
|
code
| 1 |
github-code
|
6
|
69877253629
|
import torch
from torch import nn
import yaml
import cv2
import numpy as np
from vidgear.gears import CamGear
from matplotlib import pyplot as plt
from IPython.display import Image, clear_output
import argparse
import os
import datetime
import sys
from PIL import ImageFont, ImageDraw, Image
import time
from pathlib import Path
from utils.plots import *
from utils.torch_utils import *
from utils.general import *
from utils.datasets import letterbox
import gdown
from deep_sort.utils.parser import get_config
from deep_sort.deep_sort import DeepSort
import pymysql
# path 정리
path = "C:/Users/YongJun/Desktop/YOLO/1228_TNS/images"
model = torch.load('C:/Users/YongJun/Desktop/YOLO/YOLOv5s_1229.pt')
image_paths = sorted([os.path.join(path, f) for f in os.listdir(path) if f.endswith(".jpg") or f.endswith(".png")])
label_paths = sorted([os.path.join(path, f) for f in os.listdir(path) if f.endswith(".txt")])
# DB 연결
conn = pymysql.connect(
host='localhost',
user='root',
password='013579',
db='tns_db',
charset='utf8'
)
# DB 저장 데이터 - 시작 시간 기록
start_time = time.time()
# 라벨링한 클래스
class_dict = {
"OK": 0,
"NG_Blur": 1,
"NG_Scratch": 2,
}
ok_idx = class_dict['OK']
ng_blur_idx = class_dict['NG_Blur']
ng_scratch_idx = class_dict['NG_Scratch']
labels = []
# 웹캠 설정
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(1)
# 웹캠 1번 탐지
ok_count1 = 0
ng_blur_count1 = 0
ng_scratch_count1 = 0
# 웹캠 2번 탐지
ok_count2 = 0
ng_blur_count2 = 0
ng_scratch_count2 = 0
# 최종 탐지
ok_count = 0
ng_count = 0
# 추가 부분
ok_detected1 = False
ok_detected2 = False
ng_detected1 = False
ng_detected2 = False
# YOLO 실행
while True:
# 현재 시간 기록
current_time = time.time()
# 이전 프레임과의 시간 차이 계산
elapsed_time = current_time - start_time
# 이전 프레임의 시간 업데이트
start_time = current_time
ret1, frame1 = cap1.read()
ret2, frame2 = cap2.read()
labels1 = []
labels2 = []
results1 = model(frame1)
results2 = model(frame2)
detections1 = results1.xyxy[0]
detections2 = results2.xyxy[0]
# 각 객체에 대해 Loop를 돌며, Line을 지나갔는지 검사
for detection in detections1:
# 객체의 중심점 좌표 계산
center_x = (detection[0] + detection[2]) / 2
center_y = (detection[1] + detection[3]) / 2
# 객체가 Line을 지나갔는지 검사 - Line 설정 (317 ~ 323)
if center_x > 317 and center_x < 323:
label = detection[5]
labels1.append(label)
if label == ok_idx:
ok_count1 += 1
elif label == ng_blur_idx:
ng_blur_count1 += 1
elif label == ng_scratch_idx:
ng_scratch_count1 += 1
# 각 객체에 대해 Loop를 돌며, Line을 지나갔는지 검사
for detection in detections2:
# 객체의 중심점 좌표 계산
center_x = (detection[0] + detection[2]) / 2
center_y = (detection[1] + detection[3]) / 2
# 객체가 Line을 지나갔는지 검사 - Line 설정 (317 ~ 323)
if center_x > 317 and center_x < 323:
label = detection[5]
labels2.append(label)
if label == ok_idx:
ok_count2 += 1
elif label == ng_blur_idx:
ng_blur_count2 += 1
elif label == ng_scratch_idx:
ng_scratch_count2 += 1
# 추가 부분
if ok_idx in labels1 and ok_idx in labels2:
ok_detected1 = True
ok_detected2 = True
if ok_detected1 and ok_detected2:
ok_count += 1
ok_detected1 = False
ok_detected2 = False
if ng_blur_idx in labels1 or ng_scratch_idx in labels1:
ng_detected1 = True
if ng_blur_idx in labels2 or ng_scratch_idx in labels2:
ng_detected2 = True
if ng_detected1 or ng_detected2:
ng_count += 1
ng_detected1 = False
ng_detected2 = False
# DB 연동
cursor = conn.cursor()
count = 0
for detection in detections1:
count += 1
name = f"name{count}"
# SQL 문을 통해서 MariaDB의 Table에 저장 - 제품 , 개수 , 상태 , confidence 등 추가
# (INSERT INTO tns (table name) id (table column ... ) VALUES (%s , %s , ...), (python_msg, python_msg))
cursor.execute("INSERT INTO tns (id) VALUES (%s)",
(ok_count1))
conn.commit()
# 동영상에서 나오는 cv2 의 text 정리
cv2.putText(frame1, f'OK: {ok_count}',
(30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame1, f'NG_Blur: {ng_blur_count}',
(30, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame1, f'NG_Scratch: {ng_scratch_count}',
(30, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame2, f'OK: {ok_count2}',
(30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame2, f'NG_Blur: {ng_blur_count2}',
(30, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame2, f'NG_Scratch: {ng_scratch_count2}',
(30, 80), cv2.FONT_HERSHEY_SIMPLEX, 1,(0, 0, 255), 2)
cv2.line(frame1, (320, 0), (320, 640), (255, 0, 0), 2)
cv2.line(frame2, (320, 0), (320, 640), (255, 0, 0), 2)
cv2.imshow('TNS_CAP1', np.squeeze(results1.render()))
cv2.imshow('TNS_CAP2', np.squeeze(results2.render()))
# 'q' 키를 누르면 종료
if cv2.waitKey(1) == ord("q"):
break
# 종료시 리소스 해제
cap1.release()
cap2.release()
cv2.destroyAllWindows()
|
yeonsoo98/yolov5_object_count
|
detect.py
|
detect.py
|
py
| 5,852 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
21640069600
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from .models import VideoFile
class VideoFileForm(forms.ModelForm):
"""Form for user file uploading."""
def clean(self):
cleaned_data = super().clean()
original_file = cleaned_data.get('original_file')
url = cleaned_data.get('url')
if original_file and url:
raise ValidationError(
_('Only one field must be filled.')
)
elif not original_file and not url:
raise ValidationError(
_('Please enter data in one of these fields.')
)
class Meta:
model = VideoFile
fields = (
'original_file',
'url',
)
|
sergeybe/video-archive
|
src/video/forms.py
|
forms.py
|
py
| 813 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15309466710
|
import asyncio
from misc import dp,bot
from .sqlit import reg_user
from aiogram import types
channel1 = -1001804437355
content_id = -1001165606914
print(1)
markup = types.InlineKeyboardMarkup()
bat_a = types.InlineKeyboardButton(text='Access to group 🔑', url = 'https://t.me/share/url?url=https%3A%2F%2Ft.me%2F%2BH4vDT3QPa381ODUy')
markup.add(bat_a)
async def posting():
while True:
q = await bot.copy_message(chat_id=channel1,from_chat_id=content_id,message_id=10,reply_markup=markup)
await asyncio.sleep(45)
await bot.delete_message(chat_id=channel1,message_id=q.message_id)
@dp.chat_join_request_handler()
async def join(update: types.ChatJoinRequest):
reg_user(update.from_user.id,1)
await bot.copy_message(chat_id=update.from_user.id, from_chat_id=content_id, message_id=16, reply_markup=markup)
try:
await update.approve()
except:
pass
@dp.message_handler(commands=['start'])
async def start_bot(message: types.Message):
reg_user(message.chat.id,ref=1)
print(2)
await bot.copy_message(chat_id=message.chat.id, from_chat_id=content_id, message_id=16, reply_markup=markup)
|
pytera895143242/spec2rep
|
handlers/commands_start.py
|
commands_start.py
|
py
| 1,164 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13111704429
|
def update_single(conn, cursor, table, column, file_number, var):
# update a single column in a sql db. Key is file_number.
sql_update = "UPDATE " + table + " SET " + column + "= ? WHERE File_number = '" + file_number + "'"
cursor.execute(sql_update, [var])
conn.commit()
def insert(conn, cursor, table, columns, data):
# insert data in multiple cols in a sql db. adds a new row
col_number = len(data)
place_holder = ["?"] * col_number
place_str = ",".join(place_holder)
sql_insert = "INSERT INTO " + table + "(" + columns + ") VALUES (" + place_str + ")"
cursor.execute(sql_insert, data)
conn.commit()
def update_multiple(conn, cursor, table, columns, file_number, data):
# update multiple columns in a sql db. Key is file_number.
col_number = len(data)
for index in range(0, col_number):
sql_update = "UPDATE " + table + " SET " + columns[index] + "= ? WHERE File_number = '" + file_number + "'"
var = data[index]
cursor.execute(sql_update, [var])
conn.commit()
def add_columns(cursor, table, columns):
col_number = len(columns)
for index in range(0, col_number):
sql_add = "ALTER TABLE " + table + " ADD " + columns[index]
cursor.execute(sql_add)
|
dakelkar/Create_BreastCancerDB
|
new_version/add_update_sql.py
|
add_update_sql.py
|
py
| 1,264 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30272112886
|
from .views import *
from django.urls import path
urlpatterns = [
path('', home, name='home'),
path('login/', login_user, name='login'),
path('contact/', contact, name='contact'),
path('api/<str:userid>/', api, name='api'),
path('logout/', logout_user, name='logout'),
path('register/', register, name='register'),
path('server-maintenance/', freeze, name='freeze'),
path('exam-status/<str:user>/', exam_end, name='exam_end'),
path('exam-credential/', exam_authentication, name='exam_auth'),
path('exam-credential/auth-user/exam/<str:userid>/', exam, name='exam'),
path('activate/<uidb64>/<token>/<details>/', user_verification, name='activate')
]
|
supratim531/hetc-web
|
scholarship/urls.py
|
urls.py
|
py
| 693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29575131193
|
import json
from argo_ams_library import AmsException, AmsMessage, ArgoMessagingService
class PullPublish:
def __init__(self, config):
self.pull_sub = config["pull_sub"]
self.pub_topic = config["pub_topic"]
self.pull_topic = config["pull_topic"]
self.ams = ArgoMessagingService(endpoint=config["host"], token=config["token"], project=config["project"])
def pull(self, nummsgs):
messages = []
try:
if not self.ams.has_sub(self.pull_sub):
self.ams.create_sub(self.pull_sub, self.pull_topic)
except AmsException as e:
print(e)
raise SystemExit(1)
# try to pull number of messages from subscription. method will
# return (ackIds, AmsMessage) tuples from which ackIds and messages
# payload will be extracted.
ackids = list()
for id, msg in self.ams.pull_sub(self.pull_sub, nummsgs):
data = msg.get_data()
# msgid = msg.get_msgid()
# attr = msg.get_attr()
messages.append(json.loads(data.decode("utf-8")))
# print('msgid={0}, data={1}, attr={2}'.format(msgid, data, attr))
ackids.append(id)
return messages, ackids
def ack(self, ackids):
# pass list of extracted ackIds to AMS Service so that
# it can move the offset for the next subscription pull
# (basically acknowledging pulled messages)
if ackids:
self.ams.ack_sub(self.pull_sub, ackids)
def publish(self, messages):
# messages = [{data:[{id:1},{state:'deployed'}],attributes=''}]
try:
if not self.ams.has_topic(self.pub_topic):
self.ams.create_topic(self.pub_topic)
except AmsException as e:
print(e)
raise SystemExit(1)
# publish one message to given topic. message is constructed with
# help of AmsMessage which accepts data and attributes keys.
# data is Base64 encoded, attributes is dictionary of arbitrary
# key/value pairs
msg = AmsMessage()
msglist = []
for message in messages:
msglist.append(msg(data=json.dumps(message["data"]), attributes={}))
try:
ret = self.ams.publish(self.pub_topic, msglist)
print(ret)
except AmsException as e:
print(e)
|
rciam/rciam-federation-registry-agent
|
ServiceRegistryAms/PullPublish.py
|
PullPublish.py
|
py
| 2,400 |
python
|
en
|
code
| 3 |
github-code
|
6
|
15768547417
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Type
from django.db.models import JSONField
from django.db.models.lookups import Lookup
from pymilvus.client.types import DataType
from .lookups import get_nearest_n
if TYPE_CHECKING:
from django_milvus.connection import Connection
class MilvusField(JSONField):
def __init__(
self,
dim: int,
dtype: DataType,
*args: Any,
dbname: str = "default",
nlist: int = 1024,
nprobe: int = 32,
metric_type: str = "L2",
index_type: str = "IVF_FLAT",
**kwargs: Any,
) -> None:
self.dim = dim
self.dtype = dtype
self.dbname = dbname
self.nlist = nlist
self.nprobe = nprobe
self.metric_type = metric_type
self.index_type = index_type
super().__init__(*args, **kwargs)
def get_connection_class(self) -> Type["Connection"]:
from .connection import Connection
return Connection
def get_connection(self) -> Connection:
return self.get_connection_class()(self.dbname)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs.update(
{
"dim": self.dim,
"dtype": self.dtype,
"dbname": self.dbname,
"nlist": self.nlist,
"nprobe": self.nprobe,
"metric_type": self.metric_type,
"index_type": self.index_type,
}
)
return name, path, args, kwargs
def get_lookup(self, lookup_name: str) -> Type[Lookup] | None:
if lookup_name.startswith("nearest"):
try:
return get_nearest_n(
int(lookup_name[8:]),
self.model,
self,
self.get_connection(),
)
except ValueError:
raise ValueError(
"incorrect syntax when looking up nearby vectors: use nearest_{int}. got {lookup_name}"
)
else:
raise ValueError("Not supported lookup: " + lookup_name)
|
kaleido-public/django-milvus
|
django_milvus/fields.py
|
fields.py
|
py
| 2,190 |
python
|
en
|
code
| 4 |
github-code
|
6
|
73925325948
|
CUSTOMERS = [
{
"id": 1,
"name": "Ryan Tanay"
},
{
"id": 2,
"name": "Keeley Jones"
},
{
"id": 3,
"name": "Summer Smith"
}
]
def get_all_customers():
return CUSTOMERS
# Function with a single parameter
def get_single_customer(id):
# Variable to hold the found customer, if it exists
requested_customer = None
# Iterate the customerS list above. Very similar to the
# for..of loops you used in JavaScript.
for customer in CUSTOMERS:
# Dictionaries in Python use [] notation to find a key
# instead of the dot notation that JavaScript used.
if customer["id"] == id:
requested_customer = customer
return requested_customer
def create_customer(customer):
# Get the id value of the last customer in the list
max_id = CUSTOMERS[-1]["id"]
# Add 1 to whatever that number is
new_id = max_id + 1
# Add an `id` property to the customer dictionary
customer["id"] = new_id
# Add the customer dictionary to the list
CUSTOMERS.append(customer)
# Return the dictionary with `id` property added
return customer
def delete_customer(id):
customer_index = -1
for index, customer in enumerate(CUSTOMERS):
if customer["id"] == id:
customer_index = index
if customer_index >= 0:
CUSTOMERS.pop(customer_index)
def update_customer(id, new_customer):
for index, customer in enumerate(CUSTOMERS):
if customer["id"] == id:
CUSTOMERS[index] = new_customer
break
|
kellyfrancoeur/kennel-server
|
views/customer_requests.py
|
customer_requests.py
|
py
| 1,601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12731720405
|
#/usr/bin/python3.8
"""
This example implements the interaction between Qt Widgets and a 2D
matplotlib plot showing a gaussian curve with scipy.
This app displays a graph inside gui
"""
import sys
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT
from PySide6.QtCore import Qt, Slot
from PySide6.QtWidgets import (QtApplication, QWidget, QDoubleSpinBox, QVBoxLayout, QHBoxLayout)
class PlotWidget(QWidget):
def __init__(self,parent=None):
super().__init__(parent)
#create widgets
self.view = FigureCanvas(Figure(figsize=(5,3)))
self.axes = self.view.figure.subplots()
self.toolbar = NavigationToolbar2QT(self.view,self)
self.avg_input = QDoubleSpinBox()
self.std_input = QDoubleSpinBox()
self.avg_input.setPrefix("μ: ")
self.std_input.setPrefix("σ: ")
self.std_input.setValue(10)
#create layout
input_layout = QHBoxLayout() #widgets are aligned horiz
input_layout.addWidget(self.avg_input)
input_layout.addWidget(self.std_input)
vlayout = QVBoxLayout()
vlayout.addWidget(self.toolbar)
vlayout.addWidget(self.view)
vlayout.addWidget(self.input_layout)
self.setLayout(vlayout)
#connect input with a func
self.avg_input.valueChanged.connect(self.on_change)
self.std_input.valueChanged.connect(self.on_change)
#Exec on_change func
self.on_change()
@Slot() #connect to this func
def on_change(self):
# Update plot with input values
avg = self.avg_input.value() #get data from spinbox
std = self.std_input.value()
dx = np.linspace(-100,100)
dy = norm.pdf(x, avg, std)
self.axes.clear()
self.axes.plot(dx,dy)
self.view.draw()
if __name__ == "__main__":
app = QApplication(sys.argv)
wPlot = PlotWidget()
wPlot.show()
sys.exit(app.exec())
|
ndlopez/learn_python
|
learn_qt/qt_graph.py
|
qt_graph.py
|
py
| 2,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70504080829
|
from naman.core.models import VLan
from django.core.exceptions import ImproperlyConfigured
def assign_provisioning_vlan(machine):
print("Entering assign_provisioning_vlan")
prov_vlans = VLan.objects.filter(provisioning_purpose=True)
if prov_vlans.count() == 0:
raise ImproperlyConfigured("Missing provisioning vlans")
for vlan in prov_vlans:
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
raise VLan.NoFreeIPError("No free IPs at any provisioning vlan")
def assign_backup_vlan(machine):
#logging.basicConfig(level=logging.DEBUG)
print("Entering assign_backup_vlan")
for vlan in machine.environment.backup_vlans.all().order_by('name'):
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
raise VLan.NoFreeIPError("No free IPs at any backup vlan")
def assign_management_vlan(machine):
print("Entering management vlan")
man_vlans = VLan.objects.filter(management_purpose=True).order_by("name")
if man_vlans.count() == 0:
raise ImproperlyConfigured("Missing management vlans")
for vlan in man_vlans:
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
raise VLan.NoFreeIPError("No free IPs at any management vlan")
def assign_dmz_based_on_project(machine):
print("Entering dmz based on project vlan")
#if machine.dmz_located:
project = machine
if project is None or project.dmz is None:
raise ImproperlyConfigured(
"DMZ located machine must belong to a project which has dmz vlan assing")
machine.get_vlan_config().append_vlan(project.dmz)
def assign_service_vlan_based_on_project(machine):
print("Entering service vlan based on project")
project = machine.project
for vlan in project.service_vlans.all().order_by('name'):
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
def assign_general_purpose_service_vlan(machine):
print("General purpose service vlan")
for vlan in machine.environment.service_vlans.all().order_by('-name'):
#print "trying service vlan with: %s" % vlan
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
#raise VLan.NoFreeIPError("Can't assign free IP for service vlan")
mappings = [
assign_backup_vlan,
assign_management_vlan,
assign_provisioning_vlan,
assign_dmz_based_on_project,
assign_service_vlan_based_on_project,
assign_general_purpose_service_vlan,
]
|
jpardobl/naman
|
naman/core/mappings/vlan_actions.py
|
vlan_actions.py
|
py
| 2,859 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36385272432
|
"""
create some fake json book cover records from the book review dataset from kaggle.
it's a very basic implementation for development - too clean for testing.
"""
import pandas as pd
# read the relevant fields into memory
df = pd.read_json('book_list.json') # type:pd.DataFrame
# remove unneeded cols
df = df.drop('version', axis=1)
df = df.drop('revisionDate', axis=1)
df = df.drop('publishedDate', axis=1)
df = df.drop('tagLine', axis=1)
df = df.drop('title', axis=1)
df = df.drop('coverUrl', axis=1)
# add chapterId col
df['chapterId'] = df['bookId']
# reorder cols
cols = df.columns.tolist()
cols[0], cols[-1] = cols[-1], cols[0]
cols[1], cols[-1] = cols[-1], cols[1]
df = df[cols]
# add chapter numbers
df['chapterNum'] = pd.Series(0, index=df.index)
# convert pandas dataframe to json and write to file
json = df.to_json(path_or_buf='chapter_list.json', orient='records')
|
didactapp/didact-fake-json-data-generator
|
fake_chapters.py
|
fake_chapters.py
|
py
| 896 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34045997209
|
from faster_rcnn.config import cfg, get_output_dir
import argparse
from utils_py3.timer import Timer
import numpy as np
import cv2
from utils_py3.cython_nms import nms
# from utils_py3.boxes_grid import get_boxes_grid
import pickle
# import heapq
from utils_py3.blob_helper import im_list_to_blob
import os
import math
import tensorflow as tf
from faster_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import matplotlib.pyplot as plt
from tensorflow.python.client import timeline
import time
import pdb
def _get_image_blob(im):
"""
Convert an image into a network input.
Argument :
im(ndarray): a color image in BGR order
Returns :
blob(ndarray): a data blob holding an image pyramid
im_scales_factors(list):list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
print("im_shape is:",im_shape)
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
print("min and max:",im_size_min,im_size_max)
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
# def _project_im_rois(im_rois, scales):
# """Project image RoIs into the image pyramid built by _get_image_blob.
# Arguments:
# im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
# scales (list): scale factors as returned by _get_image_blob
# Returns:
# rois (ndarray): R x 4 matrix of projected RoI coordinates
# levels (list): image pyramid levels used by each projected RoI
# """
# im_rois = im_rois.astype(np.float, copy=False)
# scales = np.array(scales)
#
# if len(scales) > 1:
# widths = im_rois[:, 2] - im_rois[:, 0] + 1
# heights = im_rois[:, 3] - im_rois[:, 1] + 1
#
# areas = widths * heights
# scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
# diff_areas = np.abs(scaled_areas - 224 * 224)
# levels = diff_areas.argmin(axis=1)[:, np.newaxis]
# else:
# levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
#
# rois = im_rois * scales[levels]
#
# return rois, levels
def _get_blobs(im):
"""
Convert an image and RoIs within that image into inputs
"""
blobs = {'data': None, 'rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries
"""
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _recales_boxes(boxes, inds, scales):
"""
Rescale boxes according to image rescaling
"""
for i in range(boxes.shape[0]):
boxes[i, :] = boxes[i, :] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im, boxes=None):
"""
Detect object classes in an image given object proposals
Arguments:
net: faster rcnn network to use
im: color image to test(in BGR order)
boxes(ndarray): R X 4 array of object proposals
Returns:
scores(ndarray): R X K array of object class scores(K includes
background as object category 0)
boxes(ndarray): R X (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im)
im_blob = blobs['data']
blobs['im_info'] = np.array([[im_blob.shape[1],
im_blob.shape[2],
im_scales[0]]], dtype=np.float32)
print(blobs['im_info'])
feed_dict = {net.data: blobs['data'], net.im_info: blobs['im_info'],
net.keep_prob: 1.0}
run_options = None
run_metadata = None
if cfg.TEST.DEBUG_TIMELINE:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# pdb.set_trace()
cls_score, cls_prob, bbox_pred, rois = sess.run(
[net.get_output('cls_score'), net.get_output('cls_prob'), net.get_output('bbox_pred'),
net.get_output('rois')], feed_dict = feed_dict, options=run_options,run_metadata=run_metadata
)
assert len(im_scales) == 1, "Only single-image batch implemented"
boxes = rois[:, 1:5] / im_scales[0]
scores = cls_prob
if cfg.TEST.BBOX_REG:
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
if cfg.TEST.DEBUG_TIMELINE:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file = open(str(long(time.time() * 1000)) + '-test-timeline.ctf.json', 'w')
trace_file.write(trace.generate_chrome_trace_format(show_memory=False))
trace_file.close()
return scores,pred_boxes
def vis_detections(im, class_name, dets, thresh=0.8):
"""
Visual debugging of detections
"""
import matplotlib.pyplot as plt
for i in range(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.title('{} {:.3f}'.format(class_name, score))
# def apply_nms(all_boxes, thresh):
def test_net(sess, net, imdb, weights_filename, max_per_image=300,
thresh=0.05, vis=False):
"""
Test a faster rcnn network on an image database
"""
num_images = len(imdb.image_index)
# pdb.set_trace()
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in range(2):
box_proposals = None
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im, box_proposals)
_t['im_detect'].toc()
# pdb.set_trace()
_t['misc'].tic()
if vis:
image = im[:, :, (2, 1, 0)]
plt.cla()
plt.imshow(image)
# skip j = 0, because it's the background class
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
print(cls_dets)
if vis:
vis_detections(image, imdb.classes[j], cls_dets)
all_boxes[j][i] = cls_dets
if vis:
plt.show()
# Limit to max_per_image detections * over all classes *
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
if j in range(1, imdb.num_classes):
keep = np.where([all_boxes[j][i][:, -1]] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
# print ('Evaluating detections')
# imdb.evaluate_detections(all_boxes, output_dir)
|
hx121071/faster-rcnn-tf-py3
|
lib/faster_rcnn/test.py
|
test.py
|
py
| 8,840 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16257468366
|
import re
import pytest
from morphocut import Pipeline
from morphocut.file import Find, Glob
@pytest.mark.parametrize("sort", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
def test_Find(data_path, sort, verbose, capsys):
d = data_path / "images"
with Pipeline() as pipeline:
filename = Find(d, [".png"], sort, verbose)
stream = pipeline.transform_stream()
filenames = [o[filename] for o in stream]
if sort:
assert filenames == sorted(filenames)
if verbose:
out = capsys.readouterr().out
assert re.search(r"^Found \d+ files in .+\.$", out)
def test_Glob(data_path):
d = data_path / "images/*.png"
with Pipeline() as pipeline:
result = Glob(d, True)
pipeline.run()
|
morphocut/morphocut
|
tests/test_file.py
|
test_file.py
|
py
| 770 |
python
|
en
|
code
| 7 |
github-code
|
6
|
71316404667
|
import requests
from bs4 import BeautifulSoup
import json
def get_pinned(github_user):
URL = f"https://github.com/{github_user}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
pinned_data = soup.find_all("div", {"class": "pinned-item-list-item-content"})
pinned_posts = []
for post in pinned_data:
pinned_posts.append(post.find("a")["href"])
return pinned_posts
def get_projects(github_user, query):
URL = f"https://github.com/{github_user}?tab=repositories&q={query}&type=source"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
projects = soup.body.find("ul", {"data-filterable-for": "your-repos-filter"})
if not projects:
return []
projects = projects.find_all("li")
projects_parsed = []
for project in projects:
project_data = {}
title = project.find("h3").a
project_data["name"] = title.text.strip().replace("-", " ").capitalize()
project_data["link"] = title["href"]
project_data["tags"] = [query]
impact = project.find("div", class_="f6 color-text-secondary mt-2")
if impact:
impact = impact.find_all("a")
for data in impact:
project_data[data["href"].split("/")[-1]] = int(data.text.strip())
if "stargazers" not in project_data:
project_data["stargazers"] = 0
if "members" not in project_data:
project_data["members"] = 0
project_data["score"] = project_data["stargazers"] + project_data["members"] * 5
else:
project_data["score"] = 0
projects_parsed.append(project_data)
return projects_parsed
def get_youtube_data(youtube_username):
initial_data = "var ytInitialData = "
final_data = ";"
url = f"https://www.youtube.com/{youtube_username}/videos"
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
scripts = soup.body.find_all("script")
videos_data = []
for script in scripts:
data = script.encode_contents().decode(errors="replace")
if initial_data not in data:
continue
data = data.replace(initial_data, "").replace(final_data, "")
tab_renderers = json.loads(data)["contents"]
tab_renderers = tab_renderers["twoColumnBrowseResultsRenderer"]["tabs"]
for tab in tab_renderers:
if "tabRenderer" not in tab:
continue
if tab["tabRenderer"]["title"] != "Videos":
continue
videos = tab["tabRenderer"]["content"]["sectionListRenderer"]
videos = videos["contents"][0]["itemSectionRenderer"]
videos = videos["contents"][0]["gridRenderer"]["items"]
for video in videos:
if "gridVideoRenderer" not in video:
continue
video = video["gridVideoRenderer"]
published = ""
if "publishedTimeText" in video:
published = video["publishedTimeText"]["simpleText"]
view_count_text = ""
if "simpleText" in video["viewCountText"]:
view_count_text = video["viewCountText"]["simpleText"]
video_data = {
"thumbnail": video["thumbnail"]["thumbnails"][-1]["url"],
"title": video["title"]["runs"][0]["text"],
"published": published,
"viewCountText": view_count_text,
"url": f"https://www.youtube.com/watch?v={video['videoId']}",
}
videos_data.append(video_data)
return videos_data
|
HectorPulido/HectorPulido
|
ReadmeGenerator/scraper.py
|
scraper.py
|
py
| 3,744 |
python
|
en
|
code
| 10 |
github-code
|
6
|
15306370520
|
""" File: eulerCharacteristics.py
Description: calculates the characteristics of the 2D Euler equation.
This includes the flux and the eigenvectors associated with it
Author: Pierre-Yves Taunay
Date: November 2018
"""
import numpy as np
from utils import P_from_Ev
GAM = 1.4
def compute_euler_flux(U,direction):
rho = U[:,0]
u = U[:,1] / rho
v = U[:,2] / rho
E = U[:,3] / rho
P = P_from_Ev(E,rho,u,v)
flx = np.zeros(U.shape)
if direction == 'dx':
flx[:,0] = rho*u
flx[:,1] = rho*u**2 + P
flx[:,2] = rho*u*v
flx[:,3] = rho*E*u + P*u
elif direction == 'dy':
flx[:,0] = rho*v
flx[:,1] = rho*u*v
flx[:,2] = rho*v**2 + P
flx[:,3] = rho*E*v + P*v
return flx
def eigenvector_x(u,v,a,q,h,nunk):
Rj = np.zeros((nunk,nunk))
Lj = np.zeros((nunk,nunk))
# Right eigenvector
Rj[0,:] = np.ones((1,nunk))
Rj[0,-1] = 0
Rj[1,0] = u - a
Rj[1,1] = u
Rj[1,2] = u + a
Rj[1,3] = 0
Rj[2,0:3] = v
Rj[2,3] = -1
Rj[3,0] = h - a * u
Rj[3,1] = q
Rj[3,2] = h + a * u
Rj[3,3] = -v
# Left eigenvector
Lj[0,0] = (GAM-1)*q + a*u
Lj[0,1] = (1-GAM)*u - a
Lj[0,2] = (1-GAM)*v
Lj[0,3] = (GAM-1)
Lj[0,:] /= (2*a**2)
Lj[1,0] = a**2 - (GAM-1)*q
Lj[1,1] = (GAM-1)*u
Lj[1,2] = (GAM-1)*v
Lj[1,3] = (1-GAM)
Lj[1,:] /= a**2
Lj[2,0] = (GAM-1)*q - a*u
Lj[2,1] = (1-GAM)*u + a
Lj[2,2] = (1-GAM)*v
Lj[2,3] = (GAM-1)
Lj[2,:] /= (2*a**2)
Lj[3,0] = v
Lj[3,2] = -1
return Rj, Lj
def eigenvector_y(u,v,a,q,h,nunk):
Rj = np.zeros((nunk,nunk))
Lj = np.zeros((nunk,nunk))
# Right eigenvector
Rj[0,:] = np.ones((1,nunk))
Rj[0,-1] = 0
Rj[1,0:3] = u
Rj[1,3] = 1
Rj[2,0] = v - a
Rj[2,1] = v
Rj[2,2] = v + a
Rj[2,3] = 0
Rj[3,0] = h - a * v
Rj[3,1] = q
Rj[3,2] = h + a * v
Rj[3,3] = u
# Left eigenvector
Lj[0,0] = (GAM-1)*q + a*v
Lj[0,1] = (1-GAM)*u
Lj[0,2] = (1-GAM)*v - a
Lj[0,3] = (GAM-1)
Lj[0,:] /= (2*a**2)
Lj[1,0] = a**2 - (GAM-1)*q
Lj[1,1] = (GAM-1)*u
Lj[1,2] = (GAM-1)*v
Lj[1,3] = (1-GAM)
Lj[1,:] /= a**2
Lj[2,0] = (GAM-1)*q - a*v
Lj[2,1] = (1-GAM)*u
Lj[2,2] = (1-GAM)*v + a
Lj[2,3] = (GAM-1)
Lj[2,:] /= (2*a**2)
Lj[3,0] = -u
Lj[3,1] = 1
return Rj, Lj
def compute_eigenvector(U,U0,direction):
rho = U[:,0]
u = U[:,1] / rho
v = U[:,2] / rho
E = U[:,3] / rho
P = P_from_Ev(E,rho,u,v)
nunk = U.shape[1]
nelem = U.shape[0]
a = np.sqrt(GAM*P/rho)
q = 1/2*(u**2 + v**2) # Dynamic pressure
h = a**2/(GAM-1) + q # Enthalpy
rho0 = U0[:,0]
u0 = U0[:,1] / rho0
v0 = U0[:,2] / rho0
E0 = U0[:,3] / rho0
P0 = P_from_Ev(E0,rho0,u0,v0)
nunk = U0.shape[1]
nelem = U0.shape[0]
a0 = np.sqrt(GAM*P0/rho0)
q0 = 1/2*(u0**2 + v0**2) # Dynamic pressure
h0 = a0**2/(GAM-1) + q0 # Enthalpy
Rjlist = []
Ljlist = []
if direction == 'dx':
Rlhs0, Llhs0 = eigenvector_x(u0[0],v0[0],a0[0],q0[0],h0[0],nunk)
for idx in range(nelem):
Rj, Lj = eigenvector_x(u[idx],v[idx],a[idx],q[idx],h[idx],nunk)
Rjlist.append(Rj)
Ljlist.append(Lj)
Rlhs0pre = None
Llhs0pre = None
elif direction == 'dy':
# For the y-direction, the bottom boundary can either be pre or post-shock
Rlhs0, Llhs0 = eigenvector_y(u0[0],v0[0],a0[0],q0[0],h0[0],nunk)
Rlhs0pre, Llhs0pre = eigenvector_y(u0[-1],v0[-1],a0[-1],q0[-1],h0[-1],nunk)
for idx in range(nelem):
Rj, Lj = eigenvector_y(u[idx],v[idx],a[idx],q[idx],h[idx],nunk)
Rjlist.append(Rj)
Ljlist.append(Lj)
Rj = Rjlist
Lj = Ljlist
return Rj,Lj,Rlhs0,Llhs0,Rlhs0pre,Llhs0pre
def to_characteristics(U,flx,U0,flx0,order,Lh,alpha,Nx,Ny,direction,options,tc,lambda_calc_char):
nelem = U.shape[0]
nunk = U.shape[1]
# Matrix holders
V = np.zeros((nelem,order+1,nunk))
VLF = np.zeros((nelem,order+1,nunk))
H = np.zeros((nelem,order+1,nunk))
# For all elements, evaluate R_{i+1/2}^-1 * [STENCIL]
# The conditional work for r = 2 (order 5)
# We do the characteristics calculation for all elements for the whole stencil
V,H,VLF = lambda_calc_char(U,flx,U0,flx0,order,Lh,alpha,direction,tc)
return V,H,VLF
|
pytaunay/weno-tests
|
python/euler_2d/eulerCharacteristics.py
|
eulerCharacteristics.py
|
py
| 4,634 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42557211122
|
import numpy
from PIL import Image
def histogram(img):
image_gray = img.convert('L')
killy = numpy.array(image_gray)
maximum = numpy.max(killy)
minimum = numpy.min(killy)
dim = maximum - minimum + 1
hist, bins = numpy.histogram(killy, bins=dim)
return hist
image1 = Image.open("image1.jpg")
hist1 = histogram(image1)
image2 = Image.open("image2.jpg")
hist2 = histogram(image2)
image3 = Image.open("image3.jpg")
hist3 = histogram(image3)
image4 = Image.open("image4.jpg")
hist4 = histogram(image4)
image5 = Image.open("image5.jpg")
hist5 = histogram(image5)
image6 = Image.open("image6.jpg")
hist6 = histogram(image6)
image7 = Image.open("image7.jpg")
hist7 = histogram(image7)
image8 = Image.open("image8.jpg")
hist8 = histogram(image8)
image9 = Image.open("image9.jpg")
hist9 = histogram(image9)
image10 = Image.open("image10.jpg")
hist10 = histogram(image10)
def calcul_distance(h1, h2):
size1 = len(h1)
size2 = len(h2)
somme = 0
somme2 = 0
if size2 > size1:
for i in range(size2):
if i < size1:
somme = somme + (min(h1[i], h2[i]))
else:
somme = somme + h2[i]
else:
for i in range(size1):
if i < size2:
somme = somme + (min(h1[i], h2[i]))
else:
somme = somme + h1[i]
for i in range(size1):
somme2 = somme2 + h1[i]
distance = 1 - somme / somme2
return distance
distance1 = calcul_distance(hist1, hist2)
distance2 = calcul_distance(hist1, hist3)
distance3 = calcul_distance(hist1, hist4)
distance4 = calcul_distance(hist1, hist5)
distance5 = calcul_distance(hist1, hist6)
distance6 = calcul_distance(hist1, hist7)
distance7 = calcul_distance(hist1, hist8)
distance8 = calcul_distance(hist1, hist9)
distance9 = calcul_distance(hist1, hist10)
dictionary = {"image2": distance1, "image3": distance2, "image4": distance3, "image5": distance4,
"image6": distance5, "image7": distance6, "image8": distance7, "image9": distance8,
"image10": distance9}
print("Order of similarity used the request image <<image1>> : ")
for w in sorted(dictionary, key=lambda x: dictionary[x] if x in dictionary else None, reverse=False):
print(w, dictionary[w])
|
jouhaina-nasri/Project-Indexation
|
TP Indexation/Histogramme/app.py
|
app.py
|
py
| 2,290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22215955629
|
import random
import os
def get_answer() -> bool:
"""
functions gets an answer which decides if
some actions will be done
:return:
"""
x = input(str("press y for yes or n for no:"))
while x != "y" and x != "n":
print("you have entered a wrong answer")
x = input(str("press y for yes or n for no:"))
return x == "y"
def get_color_biome(biome):
"""
:param biome:
:return:
"""
color = ""
obstacles = ""
if biome == "grass":
color = "\033[2;32;42m"
obstacles = ["🌳", "🌲", "🌾"]
elif biome == "snow":
color = "\033[2;32;47m"
obstacles = ["🎄", "🎁", "⛄"]
elif biome == "dessert":
color = "\033[2;32;43m"
obstacles = ["🏜", "🌴", "🏵"]
elif biome == "hell":
color = "\033[2;32;41m"
obstacles = ["🌋", "🔥", "📍"]
elif biome == "unicorn":
color = "\033[2;32;45m"
obstacles = ["🌈", "🍧", "🌺", "🦄"]
return color, obstacles
def get_correct_int_input(message) -> int:
"""
:return:
"""
while True:
try:
age = int(input(message))
except ValueError:
print(f'error input must be written with digits')
else:
break
return age
def generate_room_length_height():
"""
:return:
"""
length = random.choice(range(8, 12))
height = random.choice(range(6, 9))
return length, height
def generate_rooms(biome: str, length: int, height: int) -> list:
"""
"🧙" +🌳🌲🌾⛄🎁🎄
:return:
"""
output = []
color, obstacles = get_color_biome(biome)
output.append((" " * (length + 2)))
for i in range(height):
obstacle_column = [random.choice(range(length)) for _ in range(random.choice(range(1, 3)))]
obstacle_column.sort()
obstacle_column.append(" ")
# print(obstacle_column)
# print(obstacle_column[0])
obstacle_count = 0
row = ""
for i_1 in range(length):
if i_1 == obstacle_column[obstacle_count]:
row += f"{random.choice(obstacles)}"
obstacle_count += 1
else:
row += f" "
output.append(row)
output.append((" " * (length + 2)))
return output
def print_room(room: str, biome: str):
"""
:param room:
:param biome:
:return:
"""
print(f"\033[2;32;40m{room[0]}\033[0;0m")
color, obstacles = get_color_biome(biome)
margins = '\033[2;32;40m '
for item in room[1:-1]:
print(f"{margins}{color}{item}{margins}\033[0;0m")
print(f"\033[2;32;40m{room[-1]}\033[0;0m")
def get_random_biome() -> str:
"""
:return:
"""
return random.choice(["grass", "snow", "dessert", "hell", "unicorn"])
def add_object_on_map(room: list, hero: str, column: int, row: int):
"""
:param hero:
:param room:
:param column:
:param row:
:return:
"""
column = column - 1
output = []
count_row_positions = 0
count_row_positions_actions = 0
last_ch = 0
space_len = 1
for i, item in enumerate(room):
if i == row:
if column == 0:
if item[0] == " ":
output.append(f"{hero}{item[2::]}")
else:
output.append(f"{hero}{item[1::]}")
else:
for i, ch in enumerate(item):
if ch == " ":
count_row_positions += 1
count_row_positions_actions += 1
elif ch != " ":
count_row_positions += 1
count_row_positions_actions += 2
last_ch = i
if count_row_positions_actions >= column * 2:
break
try:
if item[i + 1] == " ":
space_len = 2
except IndexError:
space_len = 1
output.append(f"{item[0:count_row_positions]}{hero}{item[count_row_positions + space_len::]}")
else:
output.append(item)
return output
def choose_door(biome: str):
"""
:param biome:
:return:
"""
dor = ""
if biome == "grass":
dor = "⛩"
elif biome == "snow":
dor = "🏛"
elif biome == "dessert":
dor = "🛕"
elif biome == "hell":
dor = "⛩"
elif biome == "unicorn":
dor = "🏛"
return dor
def choose_enemy():
"""
:return:
"""
return random.choice(['💀', '👺', '👹', '🕷', '🦂', '👻', '👽', '😈', '🌬', '🎃', '👾', '🐲', "🎭"])
def enemy_name(how_many, symbol):
pass
def get_hero_name() -> str:
"""
If the username doesn't have unknown characters it returns the name
:return:
"""
main_path = os.getcwd()
os.chdir("Your_characters")
while True:
username = input(str("Enter your username:"))
if len(username) >= 4 and f"{username}.txt" not in os.listdir():
x = 0
for ch in username:
if ch in '!"#$%&\'*()+,/:;<=>?@[\\]^`{|}~':
x = 1
if x == 0:
break
else:
print("Error: username cannot contain any of these characters !\"#$%&\'*+,/:;<=>?@[\\]^`{|}~")
else:
if len(username) < 4:
print("Error: username must have at least 4 characters")
else:
print("username already used")
os.chdir(main_path)
return username
def write_hero_data(hero_name: str, hero_class: str, level: str,
gold: str, materials: dict, gear: dict, inventory: list, stats):
"""
:return:
"""
main_path = os.getcwd()
new_data = [hero_class, level, gold]
for k, v in materials.items():
new_data.append(f"{v}")
for k_1, v_1 in gear.items():
new_data.append(f"{v_1}")
for item in inventory:
new_data.append(item)
for item_1 in stats:
new_data.append(item_1)
os.chdir("Your_characters")
with open(f"{hero_name}.txt", "w") as fw:
for item in new_data:
fw.write(item + "\n")
os.chdir(main_path)
def read_abilities(selected_hero: str):
"""
:param selected_hero:
:return:
"""
main_path = os.getcwd()
os.chdir("heroes")
for item in os.listdir():
if item[0:4] == selected_hero[0:4]:
with open(item, "r") as fr:
content = fr.readlines()
os.chdir(main_path)
output = [item.strip() for item in content]
for item in output:
print(item)
def create_hero():
"""
:return:
"""
name = ""
main_while_break = 0
print("")
print(
"Choose a class for your hero:\nWizard(🧙)\nShadow(👤)\nVampire(🧛)\nfairy.txt(🧚)\nTriton(🧜)\nSpirit(🧞)")
hero_class = str.lower(input("Choose one option by writing the class name:"))
classes = ["wizard", "shadow", "fairy"] # ["wizard", "shadow", "vampire", "fairy", "triton", "spirit"]
classes_stats = {"wizard": [40, 850, 45, 200, 0, 0, 3, 5, 0, 0, 70], "fairy": [32, 1050, 40, 175, 15, 50, 7, 7, 0, 15, 60]}
while True:
while hero_class not in classes:
print("incorrect class, try again")
hero_class = str.lower(input("Choose one option by writing the class name:"))
with open("hero_class_description", "r") as fr:
content = fr.readlines()
for item in content:
if str.lower(item[0:4]) == str.lower(hero_class[0:4]):
print(f"\n{item}\n")
print("Choose one option:\ngo back(b)\nchoose class(c)\nread abilities(a)")
option = str.lower(input("Your option:"))
while option:
if option not in ["c", "b", "a"]:
print("option is incorrect")
elif option == "a":
read_abilities(hero_class)
elif option == "c":
items_0 = {"helm": "empty_", "chest": "empty_", "boots": "empty_", "weapon": "empty_",
"sec_weapon": "empty_", "jewel": "empty_"}
materials_0 = {"wood": '0', "iron": '0', "diamond": '0', "angelic dust": '0'}
name = get_hero_name()
write_hero_data(name, hero_class, "0", "500", materials_0, items_0, ["empty_"] * 18)
main_while_break = 1
break
else:
break
print("\nChoose one option:\ngo back(b)\nchoose class(c)\nread abilities(a)")
option = str.lower(input("Your option:"))
if main_while_break == 1:
break
else:
print("")
print("Choose a class for your hero:\nWizard(🧙)\nShadow(👤)\nVampire(🧛)"
"\nfairy.txt(🧚)\nTriton(🧜)\nSpirit(🧞)")
hero_class = str.lower(input("Choose one option by writing the class name:"))
classes = ["wizard", "shadow", "vampire", "fairy", "triton", "spirit"]
return f"{name}.txt"
def get_hero_symbol(hero_name):
"""
:param hero_name:
:return:
"""
output = ""
changed_directory = 0
main_path = os.getcwd()
if main_path != "C:\\Users\\andre\\PycharmProjects\\The-Big-Book-of-Small-Python-Projects-solved-by-ramuica-\\Angeilic Powers\\Your_characters":
os.chdir("Your_characters")
changed_directory = 1
for item in os.listdir():
if item == f"{hero_name}.txt":
with open(item, "r") as fr:
content = fr.readlines()
hero_and_class = {"wizard": "🧙", "shadow": "👤", "vampire": "🧛", "fairy": "🧚", "triton": "🧜", "spirit": "🧞"}
for k, v in hero_and_class.items():
if k == content[0].strip():
output = v
if changed_directory == 1:
os.chdir(main_path)
return output
def get_item_symbol(item):
"""
:param item:
:return:
"""
output = ""
dict_items = {"crown": "👑", "wand": "🎆",
"the_eye": "🧿", "robe": "👘",
"talisman": "🎐", "boots": "👢"}
for k, v in dict_items.items():
if item == k:
output = v
break
return output
def play_existing_hero():
"""
:return:
"""
main_path = os.getcwd()
os.chdir("Your_characters")
while True:
print("\nChoose what hero do you want to play:")
for i, item in enumerate(os.listdir()):
symbol = get_hero_symbol(item[0:-4])
print(f"{item[0:-4]}{symbol}({i + 1})")
option = get_correct_int_input("\nChoose a hero by writing his corresponding number: ")
if 0 < option <= len(os.listdir()):
output = os.listdir()[option - 1]
break
else:
print("the option you choose doesn't correspond to any hero")
os.chdir(main_path)
return output
def decide_what_hero_to_play():
"""
:return:
"""
while True:
print("Do you want to create a new hero or to play an existing one?\nCreate(c)\nChoose existing hero(e)")
option = input("\nChoose an option by writing his corresponding letter: ")
if option == "e":
return play_existing_hero()
elif option == "c":
return create_hero()
else:
print("\nthe letter you choose doesn't correspond to any option. Try again\n")
def read_inventory(file):
"""
:param file:
:return:
"""
main_path = os.getcwd()
os.chdir("Your_characters")
with open(file, "r") as fr:
content = fr.readlines()
os.chdir(main_path)
inventory = [item.strip() for item in content]
return inventory[0:31]
def get_random_rarity(difficulty):
"""
:param difficulty:
:return:
"""
red_item = 10 * difficulty
grren_item = 50 * difficulty
yellow_item = 500 * difficulty
blue_item = 1300 * difficulty
black_chance = 10000 - blue_item - yellow_item - grren_item - red_item
list_chances = ["common"] * (black_chance) + ["normal"] * (blue_item) + ["rare"] * \
(yellow_item) + ["legendary"] * (grren_item) + ["angelic"] * (red_item)
return random.choice(list_chances)
def get_rarity_color(rarity):
if rarity == "common":
return "\033[40m", "\033[30m"
elif rarity == "normal":
return "\033[44m", "\033[34m"
elif rarity == "rare":
return "\033[43m", "\033[33m"
elif rarity == "legendary":
return "\033[42m", "\033[32m"
elif rarity == "angelic":
return "\033[41m", "\033[31m"
def print_inventory(file):
"""
:param file:
:return:
"""
symbol = get_hero_symbol(file[0:-4])
inventory = read_inventory(file)
print(f"\033[4m{' ' * 30}\033[0;0m ")
print(f"|\033[95m{file[0:-4]} lv: {inventory[1]}\033[0;0m "
f"{' ' * (21 - len(file[0:-4]) - len(str(inventory[1])))}|")
print(f"| \033[1;30;45m|{symbol}|\033[2;37;0m{' ' * 10}\033[1;30;47m{item_colored_symbol(inventory[12])}\033[0;0m "
f"\033[1;30;47m{item_colored_symbol(inventory[7])}\033[2;37;0m{' ' * 7}|")
print(f"|\033[32mwood: {inventory[3]}\033[0;0m{' ' * (22 - len(str(inventory[3])))}|")
print(f"|\033[38msilver: {inventory[4]}\033[0;0m"
f"{' ' * (8 - len(str(inventory[4])))}\033[1;30;47m{item_colored_symbol(inventory[10])}\033[0;0m "
f"\033[1;30;47m{item_colored_symbol(inventory[8])}\033[0;0m \033[1;30;47m{item_colored_symbol(inventory[11])}\033[0;0m"
f"{' ' * 3}|")
print(f"|\033[34mdiamond: {inventory[5]}\033[0;0m{' ' * (19- len(str(inventory[5])))}|")
print(f"|\033[31mangelic dust: {inventory[6]}\033[0;0m{' ' * (5- len(str(inventory[6])))}\033[1;30;47m"
f"{item_colored_symbol(inventory[9])}\033[0;0m"
f"{' ' * 7}|")
print(f"|\033[33mgold: {inventory[2]}\033[0;0m{' ' * (22- len(str(inventory[2])))}|")
row_1 = ""
row_2 = ""
row_3 = ""
row_counter = 1
for item in inventory[13::]:
if row_counter <= 6:
row_1 += f"{item_colored_symbol(item)} "
elif row_counter <= 12:
row_2 += f"{item_colored_symbol(item)} "
elif row_counter <= 18:
row_3 += f"{item_colored_symbol(item)} "
row_counter += 1
if row_counter < 7:
row_1 += "\033[1;30;47m \033[0;0m " * (7 - row_counter)
row_2 += "\033[1;30;47m \033[0;0m " * 6
row_3 += "\033[1;30;47m \033[0;0m " * 6
elif row_counter < 13:
row_2 += "\033[1;30;47m \033[0;0m " * (13 - row_counter)
row_3 += "\033[1;30;47m \033[0;0m " * 6
elif row_counter < 19:
row_3 += "\033[1;30;47m \033[0;0m " * (19 - row_counter)
print(f"|{' ' * 28}|")
print(f"| {row_1} |")
print(f"|{' ' * 28}|")
print(f"| {row_2} |")
print(f"|{' ' * 28}|")
print(f"| {row_3} |")
print(f"|\033[4m{' ' * 28}\033[0;0m|")
def random_primary_stat(rarity,):
"""
:return:
"""
if rarity == "common":
return f"primary: {random.choice(range(2, 5))}"
elif rarity == "normal":
return f"primary: {random.choice(range(4, 12))}"
elif rarity == "rare":
return f"primary: {random.choice(range(10, 25))}"
elif rarity == "legendary":
return f"primary: {random.choice(range(50, 100))}"
elif rarity == "angelic":
return f"primary: 200"
def random_secondary_stat(rarity):
"""
:param rarity:
:return:
"""
stat_factor = {"armor": 1, "hp": 10, "crit": 0.2, "crit_dmg": 1, "speed": 1, "mr": 1,
"dodge": 0.4, "cc_immun": 0.2, "mana": 0.2}
stat_name = random.choice(list(stat_factor.keys()))
if rarity == "common":
return f"{stat_name}: {round(random.choice(range(2, 5)) * stat_factor[stat_name], 1)}"
elif rarity == "normal":
return f"{stat_name}: {round(random.choice(range(4, 12)) * stat_factor[stat_name], 1)}"
elif rarity == "rare":
return f"{stat_name}: {round(random.choice(range(10, 25)) * stat_factor[stat_name], 1)}"
elif rarity == "legendary":
return f"{stat_name}: {round(random.choice(range(50, 100)) * stat_factor[stat_name], 1)}"
elif rarity == "angelic":
return f"{stat_name}: {round(200 * stat_factor[stat_name], 1)}"
def create_item(item_name, difficulty):
"""
:param item_name:
:param difficulty:
:return:
"""
rarity = get_random_rarity(difficulty)
return f"{item_name} {rarity} {random_primary_stat(rarity)}" \
f" {random_secondary_stat(rarity)} {random_secondary_stat(rarity)} {random_secondary_stat(rarity)}"\
def print_item(item):
"""
:param item:
:return:
"""
item_l = item.split()
item_color, text_color = get_rarity_color(item_l[1])
print(f" {text_color}{'_' * 16}\033[0;0m ")
print(f"{text_color}|\033[0;0m{item_l[0]}{' ' * (15 - len(item_l[0]) - len(item_l[1]))}{text_color}{item_l[1]} |")
print(f"{text_color}|\033[0;0m {item_color}{get_item_symbol(item_l[0]) }\033[0;0m{' ' * 13}{text_color}|\033[0;0m")
print(f"{text_color}|{' ' * 16}|\033[0;0m")
print(f"{text_color}|\033[0;0m {item_l[2]} {item_l[3]}\033[0;0m{' ' * (14 - len(item_l[2]) - len(item_l[3]))}{text_color}|\033[0;0m")
print(f"{text_color}|\033[0;0m {item_l[4]} {item_l[5]}\033[0;0m{' ' * (14 - len(item_l[4]) - len(item_l[5]))}{text_color}|\033[0;0m")
print(f"{text_color}|\033[0;0m {item_l[6]} {item_l[7]}\033[0;0m{' ' * (14 - len(item_l[6]) - len(item_l[7]))}{text_color}|\033[0;0m")
print(f"{text_color}|\033[0;0m {item_l[8]} {item_l[9]}\033[0;0m{' ' * (14 - len(item_l[8]) - len(item_l[9]))}{text_color}|\033[0;0m")
print(f"{text_color}|{'_' * 16}|\033[0;0m")
def item_colored_symbol(item):
"""
:param item:
:return:
"""
if item == "empty":
return "\033[1;30;47m⬛\033[0;0m"
else:
item_l = item.split()
item_color, text_color = get_rarity_color(item_l[1])
return f"{item_color}{get_item_symbol(item_l[0])}\033[0;0m"
def navigate_inventory(file):
"""
:return:
"""
full_inventory = read_inventory(file)
inventory = full_inventory[7::]
print("inventory option:\n-see stats of a weapon(s)\n-equip a weapon(e)\ngo back(b)")
option = input("choose one option by writing its corresponding value:")
while True:
if option == "s":
print("Choose item:\nhead(0)\nchest(1)\nfeet(2)\nmain hand(3)\n2nd hand(4)\n"
"jewel(5)\ninventory(coresponding number)")
option_s = get_correct_int_input("choose one option by writing its corresponding value:")
if option_s <= 21:
if inventory[option_s] == "empty":
print("You don't have an item in that slot")
else:
print_item(inventory[option_s])
else:
print("Wrong value entered")
elif option == "b":
break
else:
print("wrong value entered")
print("inventory option:\n-see stats of a weapon(s)\n-equip a weapon(e)\ngo back(b)")
option = input("choose one option by writing its corresponding value:")
def hero_movement(file, column: int, row: int, max_column: int, max_row: int, room):
"""
:param column:
:param row:
:param max_column:
:param max_row:
:return:
"""
print("You can move by entering a sequence of movements:\n-go up(w)\n-go left(a)\n-go down(s)\n-go right(d)\n\n"
"Other options:\nsee inventory(i)\nexit(e)")
movement = str.lower(input("enter a sequence of movements:"))
correct_movement = 0
while True:
if movement == "i":
print_inventory(file)
navigate_inventory(file)
return column, row
elif movement == "e":
print("Are you sure you want to exit?")
if get_answer():
return 0, 0
else:
row_func = row
column_func = column
while True:
for ch in movement:
if ch not in "wasd":
print("incorrect sequence")
break
else:
for ch in movement:
if ch == "w":
row_func -= 1
if ch == "s":
row_func += 1
if ch == "a":
column_func -= 1
if ch == "d":
column_func += 1
if 0 < column_func <= max_column and 0 < row_func <= max_row:
print(f"Your room: {room}")
print(f"column:{column_func} row:{row_func}")
return column_func, row_func
else:
print("incorrect movement")
break
movement = str.lower(input("enter a sequence of movements:"))
def village():
"""
:return:
"""
return ["", f"{' ' * 2}⚒{' ' * 3}‼", f"{' ' * 7}", f"{' ' * 3}⛲{' ' * 3}", f"{' ' * 7}", f"💍{' ' * 2}💰{' ' * 2}📕"]
def print_village(village):
margin = f"\033[48;5;0m{' ' * 2}\033[0;0m"
print(f"\033[48;5;0m{' ' * 22}\033[0;0m")
print(f"\033[48;5;0m{' ' * 2}\033[48;5;28m🏘🏠⛪\033[48;5;0m{' ' * 2}\033[48;5;28m🏚🏤\033[48;5;0m \033[0;0m")
print(f"{margin}{margin}\033[48;5;28m{village[1]}⛪{margin}")
print(f"{margin}{margin}\033[48;5;28m{village[2]}{margin}{margin}")
print(f"{margin}{margin}\033[48;5;28m{village[3]}{margin}{margin}")
print(f"{margin}\033[48;5;28m🏰{village[4]}{margin}{margin}")
print(f"{margin}\033[48;5;28m💒{village[5]}🏚{margin}")
print(f"\033[48;5;0m{' ' * 1}\033[48;5;28m🏘🏠\033[48;5;0m{' ' * 2}\033[48;5;28m🏚🏤\033[48;5;0m {' ' * 3}\033[0;0m")
print(f"\033[48;5;0m{' ' * 22}\033[0;0m")
def village_interface(name):
column_v, row_v = 1, 3
print(f"Move to the book(📕) if you want to read the Tutorial, and the rules")
village_with_hero = add_object_on_map(village(), get_hero_symbol(name), column_v, row_v)
print_village(village_with_hero)
while True:
column_v, row_v = hero_movement("ramulica.txt", column_v, row_v, 7, 5, "Village")
if column_v == 0 and row_v == 0:
return False
else:
village_with_hero = add_object_on_map(village(), get_hero_symbol(name), column_v, row_v)
print_village(village_with_hero)
if column_v == 7 and row_v == 1:
print("Do you want to go in a mission?")
if get_answer():
return True
elif column_v == 1 and row_v == 5:
print("Do you want to buy something from jeweler?")
if get_answer():
print("make jeweler interface")
elif column_v == 4 and row_v == 5:
print("Do you want to buy something from shop?")
if get_answer():
print("make shop interface")
elif column_v == 7 and row_v == 5:
print("Do you want to read the tutorial?")
if get_answer():
print("Tutorial")
elif column_v == 3 and row_v == 1:
print("Do you want to craft something at the blacksmith?")
if get_answer():
print("make blacksmith interface")
def margin_random_generator(column, row):
cl_rw = ["x", "y"]
random.shuffle(cl_rw)
if cl_rw[0] == "x":
return [random.choice([column, 1]), random.choice(range(1, row))]
else:
return [random.choice(range(1, column)), random.choice([row, 1])]
def door_data(entrance, biome, column, row, next_room):
if entrance == "up":
return random.choice(range(2, column - 1)), 1, next_room, choose_door(biome)
elif entrance == "down":
return random.choice(range(2, column - 1)), row, next_room, choose_door(biome)
elif entrance == "right":
return column, random.choice(range(2, row - 1)), next_room, choose_door(biome)
elif entrance == "left":
return 1, random.choice(range(2, row - 1)), next_room, choose_door(biome)
class Map:
def __init__(self, room, entrance, exit, length, height):
self.entrance = entrance
room_m = add_object_on_map(room, self.entrance[3], self.entrance[0], self.entrance[1])
self.exit = exit
room_m = add_object_on_map(room_m, self.exit[3], self.exit[0], self.exit[1])
list_of_enemies = []
self.enemy_1 = margin_random_generator(length, height), choose_enemy()
list_of_enemies.append(self.enemy_1)
self.enemy_2 = margin_random_generator(length, height), choose_enemy()
list_of_enemies.append(self.enemy_2)
self.enemy_3 = margin_random_generator(length, height), choose_enemy()
list_of_enemies.append(self.enemy_3)
if random.choice([True, False]):
self.enemy_4 = [random.choice(range(1, length)), random.choice(range(1, height))], choose_enemy()
list_of_enemies.append(self.enemy_4)
if random.choice([True, False]):
self.enemy_5 = [random.choice(range(1, length)), random.choice(range(1, height))], choose_enemy()
list_of_enemies.append(self.enemy_5)
if random.choice([True, False]):
self.enemy_6 = [random.choice(range(1, length)), random.choice(range(1, height))], choose_enemy()
list_of_enemies.append(self.enemy_6)
self.room = room_m
self.list_of_enemies = list_of_enemies
self.dead_enemies = []
def room_final(self):
room_e = self.room
for item in self.list_of_enemies:
if item[0] not in self.dead_enemies:
room_e = add_object_on_map(room_e, item[1], item[0][0], item[0][1])
return room_e
def add_random_doors():
door_wall = ["up", "down"]
for _ in range(5):
if door_wall[-1] == "down":
door_wall.extend(random.choice([['up', 'down'], ['right', 'left'], ['left', 'right']]))
elif door_wall[-1] == "up":
door_wall.extend(random.choice([['down', 'up'], ['right', 'left'], ['left', 'right']]))
elif door_wall[-1] == "left":
door_wall.extend(random.choice([['down', 'up'], ['right', 'left'], ['up', 'down']]))
elif door_wall[-1] == "right":
door_wall.extend(random.choice([['down', 'up'], ['up', 'down'], ['left', 'right']]))
return door_wall[1::]
def generate_arena(player, enemies):
arena = [f"{' '* 54}"] * 15
column_row_l = [[14, 12], [10, 11], [18, 11], [6, 10], [22, 10], [2, 9], [26, 9]]
effects_coordinates = [[0, 1], [-1, 1], [1, 1], [0, 2], [-1, 2], [1, 2]]
for i, item in enumerate(player):
for i_1, effect in enumerate(item[1::]):
arena = add_object_on_map(arena, effect, column_row_l[i][0] + effects_coordinates[i_1][0],
column_row_l[i][1] - effects_coordinates[i_1][1])
arena = add_object_on_map(arena, player[i][0], column_row_l[i][0], column_row_l[i][1])
for i, item in enumerate(enemies):
for i_1, effect in enumerate(item[1::]):
arena = add_object_on_map(arena, effect, column_row_l[i][0] + effects_coordinates[i_1][0],
column_row_l[i][1] - effects_coordinates[i_1][1] - 6)
arena = add_object_on_map(arena, enemies[i][0], column_row_l[i][0], column_row_l[i][1] - 6)
return arena
"""
player: [[][]]
"""
def ordonate_hp(entities):
coordinates = [7, 5, 9, 3, 11, 1, 13]
life_row = [" ", " "] * 7
life_str = ""
for i in range(len(entities)):
life_row[coordinates[i]] = str(int(entities[i].life[0])).zfill(6)
for item in life_row:
life_str += item
return life_str
def printe_arena(arena, ally_l, enemy_l):
margin = f"\033[48;5;16m \033[0;0m"
print(f"\033[38;5;34m{ordonate_hp(enemy_l)}\033[0;0m")
print(f"{margin}\033[48;5;16m{arena[0]}\033[0;0m{margin}")
for item in arena[1:-1]:
line = ""
for ch in item:
if ch != " ":
line += f"\033[48;5;246m{ch}\033[48;5;236m"
else:
line += ch
print(f"{margin}\033[48;5;236m{line}\033[0;0m{margin}")
print(f"{margin}\033[48;5;16m{arena[-1]}\033[0;0m{margin}")
print(f"\033[38;5;34m{ordonate_hp(ally_l)}\033[0;0m")
print("\nabilities")
print(ally_l[0].life, ally_l[0].mana)
def get_effect_symbol(effect):
effects = {"freeze": "❄", "burn": "🔥", "bleed": "🩸", "blind": "✨", "stun": "💫", "sleep": "💤", "taunt": "💢", "charm": "💞"}
for k, v in effects.items():
if effect == k:
return v
def stats_from_items(inventory):
added_stats = {"primary": 0, "armor": 0, "hp": 0, "crit": 0, "crit_dmg": 0, "speed": 0, "mr": 0,
"dodge": 0, "cc_immun": 0, "mana": 0}
for item in inventory[7:13]:
item_stats = item.split("-")
print(item_stats)
for stat in item_stats[2::]:
added_stats[stat[0:stat.find(":")]] += float(stat[stat.rfind(" ") + 1::])
return added_stats
class Combatant_izoteric:
def __init__(self, symbol, life, mana, data, good_bad_side, bonus = {"primary": 0, "armor": 0, "hp": 0, "crit": 0, "crit_dmg": 0, "speed": 0, "mr": 0,
"dodge": 0, "cc_immun": 0, "mana": 0}):
self.symbol = symbol
self.primary = data[0] + bonus["primary"]
self.life = [life + bonus["hp"]] + [data[1] + bonus["hp"]]
self.damage = data[2] + bonus["primary"]
self.speed = data[3] + bonus["speed"]
self.crit = data[4] + bonus["crit"]
self.crit_dmg = data[5] + bonus["crit_dmg"]
self.armor = data[6] + bonus["armor"]
self.mr = data[7] + bonus["mr"]
self.dodge = data[8] + bonus["dodge"]
self.cc_immun = data[9] + bonus["cc_immun"]
self.mana = [mana + bonus["mana"]] + [data[10] + bonus["mana"]]
self.side = good_bad_side
self.effects = {}
def deal_damage_heal(self, damage):
if self.life[0] + damage <= self.life[1]:
self.life = [self.life[0] + damage] + [self.life[1]]
else:
self.life = [self.life[1]] + [self.life[1]]
def get_lose_mana(self, mana_difference):
if self.mana[0] + mana_difference <= self.mana[1]:
self.mana = [self.mana[0] + mana_difference] + [self.mana[1]]
else:
self.mana = [self.mana[1]] + [self.mana[1]]
def round_end_effect(self):
output = {}
if 'burn' in self.effects:
Combatant_izoteric.deal_damage_heal(self, -self.damage)
if 'bleed' in self.effects:
Combatant_izoteric.deal_damage_heal(self, -(self.life[0] * 0.1))
for k, v in self.effects.items():
if v > 0:
output[k] = v - 1
self.effects = output
def printable_symbols(self):
return [self.symbol] + [get_effect_symbol(k) for k, v in self.effects.items()]
def crit_mechanic(self):
if self.crit >= 100:
return self.damage + (self.damage * (self.crit_dmg + self.crit - 100) / 100)
else:
x = random.choice([1] * int(self.crit) + [0] * (100 - int(self.crit)))
if x == 1:
return self.damage + (self.damage * self.crit_dmg / 100)
else:
return self.damage
def basic_attack_class(self, hero_class, enemies_l):
output = []
if hero_class == "wizard":
Combatant_izoteric.get_lose_mana(self, 15)
for _ in range(len(enemies_l)):
output.append([Combatant_izoteric.crit_mechanic(self)/len(enemies_l), []])
return output, "magical", "all"
def first_ability(self, hero_class, enemies_l):
output = []
if hero_class == "wizard":
if self.mana[0] >= 25:
Combatant_izoteric.get_lose_mana(self, -25)
for _ in range(len(enemies_l)):
output.append([Combatant_izoteric.crit_mechanic(self), [f'{random.choice(["freeze"] * 70 + ["none"] * 30)}2']])
else:
print("not enough mana")
return 'error'
return output, "magical", "damage"
def second_ability(self, hero_class, enemies_l):
output = []
if hero_class == "wizard":
if self.mana[0] >= 35:
Combatant_izoteric.get_lose_mana(self, -35)
try:
attack_eneies = random.sample(range(len(enemies_l)), 2)
except:
attack_eneies = [0]
for i in range(len(enemies_l)):
if i in attack_eneies:
if "freeze" in [item[0:-1] for item in enemies_l[i]]:
output.append([Combatant_izoteric.crit_mechanic(self) * 4, ["burn3"]])
else:
output.append([Combatant_izoteric.crit_mechanic(self) * 2, ["burn3"]])
else:
output.append([0, []])
else:
print("not enough mana")
return 'error'
return output, "magical", "damage"
def third_ability(self, hero_class, enemies_effects_l):
output = []
if hero_class == "wizard":
if self.mana[0] == self.mana[1]:
Combatant_izoteric.get_lose_mana(self, self.mana[1])
for item in enemies_effects_l:
new_effects = []
for effect in item:
if effect[0:-1] == "freeze":
new_effects.append("freeze2")
elif effect[0:-1] == "burn":
new_effects.append("burn3")
else:
new_effects.append(effect)
output.append([0, new_effects])
return output, "magical", "round"
else:
print("Not enough mana")
return 'error'
def forth_ability(self, hero_class, enemies_l):
output = []
if hero_class == "wizard":
if self.mana[0] >= 50:
Combatant_izoteric.get_lose_mana(self, -50)
attack_enemy = random.choice(range(len(enemies_l)))
for i in range(len(enemies_l)):
if i == attack_enemy:
if "burn" in [item[0:-1] for item in enemies_l[i]]:
Combatant_izoteric.deal_damage_heal(self, int(self.damage * 15 * 0.1))
output.append([Combatant_izoteric.crit_mechanic(self) * 15, ["blind2"]])
else:
output.append([0, []])
else:
print("not enough mana")
return 'error'
return output, "pure", "damage"
def use_ability(self, hero_class, enemiy_l):
option = input("choose ability")
while True:
if option == "a": # and "blind" not in self.effects:
ability_0 = Combatant_izoteric.basic_attack_class(self, hero_class, enemiy_l)
if ability_0 != "error":
return ability_0
elif option == "q":
ability_1 = Combatant_izoteric.first_ability(self, hero_class, enemiy_l)
if ability_1 != "error":
return ability_1
else:
print('\033[38;5;160mYou do not have enough mana\033[0;0m')
option = input("choose ability")
elif option == "w":
ability_2 = Combatant_izoteric.second_ability(self, hero_class, enemiy_l)
if ability_2 != "error":
return ability_2
else:
print('\033[38;5;160mYou do not have enough mana\033[0;0m')
option = input("choose ability")
elif option == "e":
ability_3 = Combatant_izoteric.third_ability(self, hero_class, enemiy_l)
if ability_3 != "error":
return ability_3
else:
print('\033[38;5;160mYou do not have enough mana\033[0;0m')
option = input("choose ability")
elif option == "r":
ability_4 = Combatant_izoteric.forth_ability(self, hero_class, enemiy_l)
if ability_4 != "error":
return ability_4
else:
print('\033[38;5;160mYou do not have enough mana\033[0;0m')
option = input("choose ability")
else:
option = input("choose ability")
def enemy_attack(self, allies):
output = []
if self.mana[0] == self.mana[1]:
for i in range(len(allies)):
output.append([Combatant_izoteric.crit_mechanic(self) * 1.2,
[f"{random.choice(['stun'] * 5 + ['burn'] * 5 + ['blind'] * 5 + ['none'] * 85)}2"]])
else:
Combatant_izoteric.get_lose_mana(self, 15)
attack_ally = random.choice(range(len(allies)))
for i in range(len(allies)):
if i == attack_ally:
output.append([Combatant_izoteric.crit_mechanic(self) * 1.2,
[f"{random.choice(['stun'] * 5 + ['burn'] * 5 + ['blind'] * 5 + ['none'] * 85)}2"]])
else:
output.append([0, []])
return output, "pure", "damage"
def list_of_effects(self):
output = []
for k, v in self.effects.items():
output.append(f"{k}{v}")
return output
def ability_effect(enemy_list, damage_effects):
if damage_effects[1] == "magical":
for i, item in enumerate(enemy_list):
damage = damage_effects[0][i][0] - int(item.mr)
Combatant_izoteric.deal_damage_heal(item, -damage)
for effect in damage_effects[0][i][1]:
if effect[0:-1] != "none":
item.effects[effect[0:-1]] = int(effect[-1])
elif damage_effects[1] == "pure":
for i, item in enumerate(enemy_list):
damage = damage_effects[0][i][0]
Combatant_izoteric.deal_damage_heal(item, -damage)
for effect in damage_effects[0][i][1]:
if effect[0:-1] != "none":
item.effects[effect[0:-1]] = int(effect[-1])
elif damage_effects[1] == "error":
return "again"
def remove_dead_body(enemy_list):
for item in enemy_list:
if item.life[0] <= 0:
enemy_list.remove(item)
return enemy_list
def enter_a_turn(meesage):
x = input(meesage)
while True:
if x == "c":
break
else:
print("You entered a wrong answer")
x = input(meesage)
return True
hero_file = decide_what_hero_to_play()
hero_name = hero_file[0:hero_file.rfind(".")]
hero_inventory = read_inventory(hero_file)
door_map_concatenation = add_random_doors()
biome = get_random_biome()
length_1, height_1 = generate_room_length_height()
length_2, height_2 = generate_room_length_height()
length_3, height_3 = generate_room_length_height()
length_4, height_4 = generate_room_length_height()
length_5, height_5 = generate_room_length_height()
length_6, height_6 = generate_room_length_height()
length_list = [length_1, length_2, length_3, length_4, length_5, length_6]
height_list = [height_1, height_2, height_3, height_4, height_5, height_6]
map_layout = [Map(generate_rooms(biome, length_1, height_1),
door_data(door_map_concatenation[0], biome, length_1, height_1, "Village"),
door_data(door_map_concatenation[1], biome, length_1, height_1, 1), length_1, height_1),
Map(generate_rooms(biome, length_2, height_2),
door_data(door_map_concatenation[2], biome, length_2, height_2, 0),
door_data(door_map_concatenation[3], biome, length_2, height_2, 2), length_2, height_2),
Map(generate_rooms(biome, length_3, height_3),
door_data(door_map_concatenation[4], biome, length_3, height_3, 1),
door_data(door_map_concatenation[5], biome, length_3, height_3, 3), length_3, height_3),
Map(generate_rooms(biome, length_4, height_4),
door_data(door_map_concatenation[6], biome, length_4, height_4, 2),
door_data(door_map_concatenation[7], biome, length_4, height_4, 4), length_4, height_4),
Map(generate_rooms(biome, length_5, height_5),
door_data(door_map_concatenation[6], biome, length_5, height_5, 3),
door_data(door_map_concatenation[7], biome, length_5, height_5, 5), length_5, height_5),
Map(generate_rooms(biome, length_6, height_6),
door_data(door_map_concatenation[6], biome, length_6, height_6, 4),
door_data(door_map_concatenation[7], biome, length_6, height_6, 6), length_6, height_6)
]
room_index = 0
while village_interface(hero_name):
print("Do you want to continue on the same map or not?")
if not get_answer():
door_map_concatenation = add_random_doors()
biome = get_random_biome()
length_1, height_1 = generate_room_length_height()
length_2, height_2 = generate_room_length_height()
length_3, height_3 = generate_room_length_height()
length_4, height_4 = generate_room_length_height()
length_5, height_5 = generate_room_length_height()
length_6, height_6 = generate_room_length_height()
length_list = [length_1, length_2, length_3, length_4, length_5, length_6]
height_list = [height_1, height_2, height_3, height_4, height_5, height_6]
map_layout = [Map(generate_rooms(biome, length_1, height_1),
door_data(door_map_concatenation[0], biome, length_1, height_1, "Village"),
door_data(door_map_concatenation[1], biome, length_1, height_1, 1), length_1, height_1),
Map(generate_rooms(biome, length_2, height_2),
door_data(door_map_concatenation[2], biome, length_2, height_2, 0),
door_data(door_map_concatenation[3], biome, length_2, height_2, 2), length_2, height_2),
Map(generate_rooms(biome, length_3, height_3),
door_data(door_map_concatenation[4], biome, length_3, height_3, 1),
door_data(door_map_concatenation[5], biome, length_3, height_3, 3), length_3, height_3),
Map(generate_rooms(biome, length_4, height_4),
door_data(door_map_concatenation[6], biome, length_4, height_4, 2),
door_data(door_map_concatenation[7], biome, length_4, height_4, 4), length_4, height_4),
Map(generate_rooms(biome, length_5, height_5),
door_data(door_map_concatenation[6], biome, length_5, height_5, 3),
door_data(door_map_concatenation[7], biome, length_5, height_5, 5), length_5, height_5),
Map(generate_rooms(biome, length_6, height_6),
door_data(door_map_concatenation[6], biome, length_6, height_6, 4),
door_data(door_map_concatenation[7], biome, length_6, height_6, 6), length_6, height_6)
]
column, row = map_layout[room_index].entrance[0], map_layout[room_index].entrance[1]
room_1_h = add_object_on_map(Map.room_final(map_layout[room_index]), get_hero_symbol(hero_name), column, row)
print_room(room_1_h, biome)
while True:
column, row = hero_movement(hero_file, column, row, length_list[room_index], height_list[room_index], room_index)
if column + row == 0:
break
for i, item in enumerate(map_layout[room_index].list_of_enemies):
if column == item[0][0] and row == item[0][1] and item[0] not in map_layout[room_index].dead_enemies:
room_1_h = add_object_on_map(Map.room_final(map_layout[room_index]), get_hero_symbol(hero_name), column, row)
print_room(room_1_h, biome)
print(f"You've encountered an enemy {item[1]}. Do you want to fight it?")
if get_answer():
difficulty = 1
enemies_len = random.choice(range(1, 7))
enemy_l = [Combatant_izoteric(item[1], 90000 / enemies_len, 0,
[40 / enemies_len, 90000 / enemies_len, 45 / enemies_len, 200, 0, 0,
3, 5, 0, 0, 45], "evil") for i in range(enemies_len)]
ally_l = []
enemy_2 = Combatant_izoteric("🧙", 850, 70, [40, 850, 45, 200, 0, 0, 3, 5, 0, 0, 70], "good",
stats_from_items(read_inventory("ramulica.txt")))
print(enemy_2.primary)
ally_l.append(enemy_2)
print(enemy_2.mana)
list_of_e = [Combatant_izoteric.list_of_effects(item_f) for item_f in enemy_l]
printe_arena(generate_arena([Combatant_izoteric.printable_symbols(enemy_2)],
[Combatant_izoteric.printable_symbols(item_f) for item_f in enemy_l]),
ally_l, enemy_l)
outcome = "Lose"
turn = 0
while True:
turn += 1
print(f"\n\033[38;5;220mIt's turn {turn}\033[0;0m\n")
if "freeze" in enemy_2.effects or "stun" in enemy_2.effects:
print(f"you are unable to attack because of your condition{enemy_2.effects}")
else:
if enter_a_turn("It's your turn to attack, enter c to continue"):
damage_u_deal = Combatant_izoteric.use_ability(enemy_2, 'wizard', list_of_e)
if damage_u_deal != "again":
ability_effect(enemy_l, damage_u_deal)
print(
f"\n\033[38;5;27mYour mana pool is {enemy_2.mana[0]}/{enemy_2.mana[1]}\033[0;0m")
print(f"You did \033[38;5;196m{damage_u_deal} damage\n\033[0;0m")
enemy_l = remove_dead_body(enemy_l)
list_of_e = [Combatant_izoteric.list_of_effects(item_f) for item_f in enemy_l]
printe_arena(generate_arena([Combatant_izoteric.printable_symbols(enemy_2)],
[Combatant_izoteric.printable_symbols(item_f) for item_f in
enemy_l]), ally_l, enemy_l)
if len(enemy_l) == 0:
outcome = "Victory"
break
if enter_a_turn("It's enemy's turn to attack, enter c to continue"):
for i, item_f in enumerate(enemy_l):
if "freeze" in item_f.effects or "stun" in item_f.effects:
print(f"enemy {i + 1} is unable to attack because of his condition{item_f.effects}")
else:
damage_dealt_by_enemy = Combatant_izoteric.enemy_attack(item_f, [enemy_2])
ability_effect([enemy_2], damage_dealt_by_enemy)
print(
f"Enemy {i + 1} did \033[38;5;196m{damage_dealt_by_enemy} damage\033[0;0m to you")
printe_arena(generate_arena([Combatant_izoteric.printable_symbols(enemy_2)],
[Combatant_izoteric.printable_symbols(item_f) for item_f in
enemy_l]), ally_l, enemy_l)
for item_f in enemy_l:
Combatant_izoteric.round_end_effect(item_f)
for item_f in ally_l:
Combatant_izoteric.round_end_effect(item_f)
if outcome == "Victory":
print("victory")
map_layout[room_index].dead_enemies.append(item[0])
break
if column == map_layout[room_index].exit[0] and row == map_layout[room_index].exit[1]:
room_1_h = add_object_on_map(Map.room_final(map_layout[room_index]), get_hero_symbol(hero_name), column, row)
print_room(room_1_h, biome)
print(room_index)
print(f"Do you want to go to room {map_layout[room_index].exit[2]}")
if get_answer():
room_index = map_layout[room_index].exit[2]
column, row = map_layout[room_index].entrance[0], map_layout[room_index].entrance[1]
elif column == map_layout[room_index].entrance[0] and row == map_layout[room_index].entrance[1]:
room_1_h = add_object_on_map(Map.room_final(map_layout[room_index]), get_hero_symbol(hero_name), column, row)
print_room(room_1_h, biome)
print(f"Do you want to go to room {map_layout[room_index].entrance[2]}")
if get_answer():
if map_layout[room_index].entrance[2] == "Village":
break
else:
room_index = map_layout[room_index].entrance[2]
column, row = map_layout[room_index].exit[0], map_layout[room_index].exit[1]
room_1_h = add_object_on_map(Map.room_final(map_layout[room_index]), get_hero_symbol(hero_name), column, row)
print_room(room_1_h, biome)
|
Ramulica/The-Big-Book-of-Small-Python-Projects-solved-by-ramuica-
|
Angeilic Powers/main.py
|
main.py
|
py
| 50,582 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29457712632
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''translate.translate: provides main() entry point.'''
__version__ = '0.1.3'
import logging
import argparse
import requests
from bs4 import BeautifulSoup
from terminaltables import AsciiTable
logging.basicConfig(
filename = '.log',
filemode = 'a+',
level = logging.INFO,
format = '%(asctime)s | %(levelname)s | %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S'
)
def main():
''' Parse the arguments and start running what needs to be running '''
parser = argparse.ArgumentParser()
parser.add_argument(
'dictionary', nargs='?', type=str, default='',
help='Dictionary to use for translation. To translate from english to french, it should take the value enfr, for english to italian, enit, etc.'
)
parser.add_argument(
'word', nargs='?', type=str, default='',
help='Word to be translated'
)
parser.add_argument(
'-l', '--list', action='store_true',
help='Returns the list of available dictionaries.'
)
args = parser.parse_args()
logging.info('Arguments parsed')
dictionaries = get_dictionaries()
if args.list:
logging.info('Attempting to print the list of available dictionaries')
print('')
print('**** Available dictionaries:')
print(dictionaries.table)
logging.info('Printed the list of available dictionaries')
if args.word and args.dictionary:
translate_word(args.dictionary, args.word)
else:
if not args.list:
logging.info('User didn\'t pass the correct arguments. Displaying the help message and shutting down')
print('Please enter a dictionary and a word.')
print('\tEnter -l or --list to get a list of all available dictionaries.')
print('Enter -h or --help for help.')
def get_dictionaries():
'''
Requests wordreference.com homepage and parse the list of availables
dictionaries
'''
url = 'http://www.wordreference.com'
logging.info('Requesting {} for parsing'.format(url))
r = requests.get(url)
if r.status_code != 200:
logging.info('Request failed with status {}'.format(r.status_code))
return -1
logging.info('Request for {} successful'.format(url))
logging.info('Attempting to parse the html and extract the list of dictionaries')
soup = BeautifulSoup(r.content, 'html.parser')
options = soup.find_all('option')
dictionaries = [ ['Key', 'Dictionary'] ]
dictionaries += [ [option['id'], option.get_text()] for option in options
if option['id'][:2] != option['id'][2:4] # No definition option
and len(option['id']) == 4 # No synonyms or conjugation option
]
logging.info('List of dictionaries extracted')
table = AsciiTable(dictionaries)
return table
def translate_word(dictionary, word):
'''
Requests the page for the translation of "word" using the dictionary
"dictionary".
Print a formatted version of the response
'''
# Iniital checks
if not isinstance(dictionary, str) or len(dictionary) != 4:
raise TypeError('''The "dictionary" argument must be a string of length 4,
with the first two letters being the acronym of the original
language, and the last two letters, the acronym of the language
you would like to translate to.''')
if not isinstance(word, str):
raise TypeError('The "word" argument must be a string (type {} passed)'.format(type(word)))
# Building the url (and formatting it) and get the html from GET
base_url = 'http://www.wordreference.com/'
url = base_url + dictionary + '/' + word.replace(' ', '%20')
logging.info('Requesting {} for parsing'.format(url))
r = requests.get(url)
if r.status_code != 200:
logging.info('Request failed with status {}'.format(r.status_code))
return -1
logging.info('Request for {} successful'.format(url))
# Parsing the html to extract the data
# I kept it to what matters:
# * Original word/expression
# * Translation
# Because who really cares if it is an intransitive verb or a noun?
logging.info('Attempting to parse the html and extract the translations')
soup = BeautifulSoup(r.content, 'html.parser')
table_single_form = soup.find_all('table', {'class': 'WRD'})[0]
try:
data_single_form = parse_translation_table(table_single_form)
except IndexError:
logging.warning('The word passed doesn\'t have any translation')
return -1
logging.info('Translations extracted')
# print the results in a pretty way
print_results(word, data_single_form)
def parse_translation_table(table):
'''
Given the table of translations extracted with BeautifulSoup, returns
a list of lists containing the various translations.
'''
data = [ ['Original Language', 'Translation'] ]
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) == 3:
if cells[2].em is None:
continue
cells[2].em.decompose()
if cells[0].get_text(strip=True) == '':
data[-1][1] += u'\n{}'.format(cells[2].get_text())
else:
data += [[
cells[0].find('strong').get_text(),
cells[2].get_text()
]]
return data
def print_results(word, data_single_form):
''' Pretty print of the translation results '''
print('')
print('**** Translations for {}:'.format(word))
print(AsciiTable(data_single_form).table)
print('')
|
alvarolopez/translate-term
|
translate/translate.py
|
translate.py
|
py
| 5,821 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26305567938
|
#!/usr/bin/python3
from main import Session, engine, User
local_session = Session(bind = engine)
#ascending
users = local_session.query(User).order_by(User.username).all()
for user in users:
print(f"{user.username}")
|
AndyMSP/holbertonschool-higher_level_programming
|
0x0F-python-object_relational_mapping/Practice_Video/ordering.py
|
ordering.py
|
py
| 225 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2810887356
|
from odoo import api, fields, models
class Note(models.Model):
_inherit = 'note.note'
@api.multi
def act_back(self):
if self._context.get('save_close'):
return {'type': 'ir.actions.act_window_close'}
return {'name': 'Notes',
'type': 'ir.actions.act_window',
'view_id': self.env.ref('note_quick.view_quick_note_kanban').id,
'view_mode': 'kanban',
'res_model': 'note.note',
'target': 'new'}
@api.model
def act_create_one(self):
return {'name': 'Create a new note',
'type': 'ir.actions.act_window',
'view_id': self.env.ref('note_quick.view_quick_note_form').id,
'view_mode': 'form',
'res_model': 'note.note',
'target': 'new'}
@api.multi
def act_edit(self):
return {'name': 'Edit Note',
'type': 'ir.actions.act_window',
'view_id': self.env.ref('note_quick.view_quick_note_form').id,
'view_mode': 'form',
'res_model': 'note.note',
'res_id': self.id,
'target': 'new'}
|
erlaangga/note_quick
|
note_quick/models/note.py
|
note.py
|
py
| 984 |
python
|
hi
|
code
| 0 |
github-code
|
6
|
38907343805
|
from sqlalchemy.exc import IntegrityError
from sqlalchemy import select
from sqlalchemy.orm import selectinload
from app.core.repo.base import BaseSqlalchemyRepo
from app.core.exceptions.repo import RepoException
from .models import Review
class ReviewRepo(BaseSqlalchemyRepo):
model = Review
async def create(self, db_session, obj_in):
"""
Create review
"""
try:
return await super().create(db_session, obj_in)
except IntegrityError as e:
raise RepoException("Review name must be unique", e)
async def get_review_by_room_id(self, db_session, room_id):
stmt = select(self.model).options(selectinload(self.model.user)).where(self.model.room_id == room_id)
result = await db_session.execute(stmt)
return result.scalars().all()
async def get(self, db_session, id):
"""
Get review by id
"""
obj = await super().get(db_session, id)
if not obj:
raise RepoException("Review not found", None, status=404)
return obj
async def get_all(self, db_session):
"""
Get all reviews
"""
return await super().list(db_session)
async def update(self, db_session, id, obj_in):
"""
Update review by id
"""
db_obj = await self.get(db_session, id)
if not db_obj:
raise RepoException("Review not found", None, status=404)
try:
return await super().update(db_session, db_obj, obj_in)
except IntegrityError as e:
raise RepoException("Review title must be unique", e)
async def delete(self, db_session, id, user_id):
"""
Delete review by id
"""
await self.get(db_session, id=id)
return await super().delete(db_session, id=id)
|
rasimatics/excursio-backend
|
app/apps/review/repo.py
|
repo.py
|
py
| 1,901 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28427138730
|
from smtplib import SMTP
from email.header import Header
from email.mime.text import MIMEText
def main():
# 请自行修改下面的邮件发送者和接收者
sender = '[email protected]'
receivers = ['[email protected]', '[email protected]']
message = MIMEText('用Python发送邮件的示例代码.', 'plain', 'utf-8')
message['From'] = Header('大锤', 'utf-8')
message['To'] = Header('发放', 'utf-8')
message['Subject'] = Header('示例代码实验邮件', 'utf-8')
smtper = SMTP('smtp.zhongfu.net')
print("qqq")
# 请自行修改下面的登录口令
smtper.login(sender, '密码')
smtper.sendmail(sender, receivers, message.as_string())
print('邮件发送完成!')
if __name__ == '__main__':
main()
|
sunhuimoon/Python100Days
|
day14/day1403.py
|
day1403.py
|
py
| 772 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39348226616
|
import os
from rebasehelper.types import Options
from rebasehelper.constants import CONFIG_PATH, CONFIG_FILENAME, CHANGES_PATCH
from rebasehelper.plugins.plugin_manager import plugin_manager
OPTIONS: Options = [
# basic
{
"name": ["--version"],
"default": False,
"switch": True,
"help": "show rebase-helper version and exit",
},
# output control
{
"name": ["-v", "--verbose"],
"default": 0,
"counter": True,
"help": "be more verbose",
},
{
"name": ["--color"],
"choices": ["always", "never", "auto"],
"default": "auto",
"help": "colorize the output, defaults to %(default)s",
},
{
"name": ["--background"],
"choices": ["dark", "light", "auto"],
"default": "auto",
"help": "use color scheme for the given background, defaults to %(default)s",
},
{
"name": ["--results-dir"],
"help": "location where the rebase-helper-results directory will be created",
},
{
"name": ["--workspace-dir"],
"help": "location where the rebase-helper-workspace directory will be created",
},
# tool selection
{
"name": ["--buildtool"],
"choices": plugin_manager.build_tools.get_all_plugins(),
"available_choices": plugin_manager.build_tools.get_supported_plugins(),
"default": plugin_manager.build_tools.get_default_plugins(True),
"help": "build tool to use, defaults to %(default)s",
},
{
"name": ["--srpm-buildtool"],
"choices": plugin_manager.srpm_build_tools.get_all_plugins(),
"available_choices": plugin_manager.srpm_build_tools.get_supported_plugins(),
"default": plugin_manager.srpm_build_tools.get_default_plugins(True),
"help": "SRPM build tool to use, defaults to %(default)s",
},
{
"name": ["--pkgcomparetool"],
"choices": plugin_manager.checkers.get_all_plugins(),
"available_choices": plugin_manager.checkers.get_supported_plugins(),
"nargs": "?",
"const": [""],
"default": plugin_manager.checkers.get_default_plugins(),
"type": lambda s: s.split(','),
"help": "set of tools to use for package comparison, defaults to "
"%(default)s if available",
},
{
"name": ["--outputtool"],
"choices": plugin_manager.output_tools.get_all_plugins(),
"available_choices": plugin_manager.output_tools.get_supported_plugins(),
"default": plugin_manager.output_tools.get_default_plugins(True),
"help": "tool to use for formatting rebase output, defaults to %(default)s",
},
{
"name": ["--versioneer"],
"choices": plugin_manager.versioneers.get_all_plugins(),
"available_choices": plugin_manager.versioneers.get_supported_plugins(),
"default": None,
"help": "tool to use for determining latest upstream version",
},
# blacklists
{
"name": ["--versioneer-blacklist"],
"choices": plugin_manager.versioneers.get_all_plugins(),
"available_choices": plugin_manager.versioneers.get_supported_plugins(),
"nargs": "?",
"const": [""],
"default": [],
"type": lambda s: s.split(","),
"help": "prevent specified versioneers from being run",
},
{
"name": ["--spec-hook-blacklist"],
"choices": plugin_manager.spec_hooks.get_all_plugins(),
"available_choices": plugin_manager.spec_hooks.get_supported_plugins(),
"nargs": "?",
"const": [""],
"default": [],
"type": lambda s: s.split(","),
"help": "prevent specified spec hooks from being run",
},
{
"name": ["--build-log-hook-blacklist"],
"choices": plugin_manager.build_log_hooks.get_all_plugins(),
"available_choices": plugin_manager.build_log_hooks.get_supported_plugins(),
"nargs": "?",
"const": [""],
"default": [],
"type": lambda s: s.split(","),
"help": "prevent specified build log hooks from being run"
},
# behavior control
{
"name": ["--bugzilla-id"],
"metavar": "BUG_ID",
"default": None,
"help": "do a rebase based on Upstream Release Monitoring bugzilla"
},
{
"name": ["--non-interactive"],
"default": False,
"switch": True,
"dest": "non_interactive",
"help": "do not interact with user",
},
{
"name": ["--favor-on-conflict"],
"choices": ["downstream", "upstream", "off"],
"default": "off",
"dest": "favor_on_conflict",
"help": "favor downstream or upstream changes when conflicts appear",
},
{
"name": ["--not-download-sources"],
"default": False,
"switch": True,
"help": "do not download sources",
},
{
"name": ["-w", "--keep-workspace"],
"default": False,
"switch": True,
"help": "do not remove workspace directory after finishing",
},
{
"name": ["--apply-changes"],
"default": False,
"switch": True,
"help": "apply {} after a successful rebase".format(CHANGES_PATCH),
},
{
"name": ["--disable-inapplicable-patches"],
"default": False,
"switch": True,
"dest": "disable_inapplicable_patches",
"help": "disable inapplicable patches in rebased SPEC file",
},
{
"name": ["--skip-version-check"],
"default": False,
"switch": True,
"help": "force rebase even if current version is newer than requested version",
},
{
"name": ["--update-sources"],
"default": False,
"switch": True,
"help": "update \"sources\" file and upload new sources to lookaside cache",
},
{
"name": ["--skip-upload"],
"default": False,
"switch": True,
"help": "skip uploading new sources to lookaside cache",
},
{
"name": ["--force-build-log-hooks"],
"default": False,
"switch": True,
"help": "enforce running of build log hooks (even in non-interactive mode)",
},
# remote builder options
{
"name": ["--builds-nowait"],
"default": False,
"switch": True,
"help": "do not wait for remote builds to finish",
},
{
"name": ["--build-tasks"],
"dest": "build_tasks",
"metavar": "OLD_TASK,NEW_TASK",
"type": lambda s: s.split(','),
"help": "comma-separated remote build task ids",
},
# additional local builder options
{
"name": ["--builder-options"],
"default": None,
"metavar": "BUILDER_OPTIONS",
"help": "enable arbitrary local builder option(s), enclose %(metavar)s in quotes "
"to pass more than one",
},
{
"name": ["--srpm-builder-options"],
"default": None,
"metavar": "SRPM_BUILDER_OPTIONS",
"help": "enable arbitrary local srpm builder option(s), enclose %(metavar)s in quotes "
"to pass more than one",
},
# misc
{
"name": ["--lookaside-cache-preset"],
"choices": ["fedpkg", "centpkg", "rhpkg", "rhpkg-sha512"],
"default": "fedpkg",
"help": "use specified lookaside cache configuration preset, defaults to %(default)s",
},
{
"name": ["--changelog-entry"],
"default": "- New upstream release %{version}",
"help": "text to use as changelog entry, can contain RPM macros, which will be expanded",
},
{
"name": ["--no-changelog-entry"],
"default": False,
"switch": True,
"help": "do not add a changelog entry at all",
},
{
"name": ["--config-file"],
"default": os.path.join(CONFIG_PATH, CONFIG_FILENAME),
"help": "path to a configuration file, defaults to %(default)s",
},
{
"name": ["-D", "--define"],
"default": [],
"append": True,
"dest": "rpmmacros",
"metavar": "'MACRO EXPR'",
"help": "define an rpm macro, can be used multiple times",
},
# sources
{
"name": ["sources"],
"metavar": "SOURCES",
"nargs": "?",
"default": None,
"help": "version number or filename of the new source archive",
},
]
def traverse_options(options):
group_index = 0
for opt in options:
if isinstance(opt, list):
for inner_opt in opt:
yield dict(group=group_index, **inner_opt)
group_index += 1
else:
yield opt
|
rebase-helper/rebase-helper
|
rebasehelper/options.py
|
options.py
|
py
| 8,712 |
python
|
en
|
code
| 42 |
github-code
|
6
|
24769179889
|
squaredWeight = None
def performCollection(cityLevel, filename):
import os
if cityLevel:
outputDir = 'GoogleTrendsCity/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
else:
outputDir = 'GoogleTrendsCountry/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
import time
from pytrends.request import TrendReq
pytrends = TrendReq()
count = 0
for keyword in kw_list:
count += 1
if not '/' in keyword:
filename = outputDir+ keyword + '.pickle'
from os import path
if not path.exists(filename):
pytrends.build_payload([keyword])
if cityLevel:
df = pytrends.interest_by_region(resolution='CITY', inc_low_vol=True, inc_geo_code=False)
else:
df = pytrends.interest_by_region(resolution='COUNTRY', inc_low_vol=True, inc_geo_code=False)
import pickle
outfile = open(filename,'wb')
pickle.dump(df,outfile)
outfile.close()
#time.sleep(3)
print(count)
def formCityList(filename):
filenameToWriteTo = "allCities.pickle"
from os import path
if not path.exists(filenameToWriteTo):
outputDir = 'GoogleTrendsCity/'
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
count = 0
allCities = {}
for keyword in kw_list:
print(count)
if count != 897:
filename = outputDir+ keyword + '.pickle'
from os import path
if path.exists(filename):
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
if len(df) != 0:
cities = list(df['geoName'])
latLong = list(df['coordinates'])
for i in range(0, len(cities), 1):
cityName = cities[i]
if not cityName.lower() in allCities:
allCities[cityName.lower()] = latLong[i]
count += 1
import pickle
outfile = open(filenameToWriteTo,'wb')
pickle.dump(allCities, outfile)
outfile.close()
def averageAndStdDevAcrossAssociationsMadeByGoogle(cityLevel, filename):
import os
if cityLevel:
outputDir = 'GoogleTrendsCity/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
else:
outputDir = 'GoogleTrendsCountry/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
import os
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
print(len(kw_list))
count = 0
valuesReturned = []
zeroValueCount = 0
for keyword in kw_list:
if keyword != 'con':
filename = outputDir+ keyword + '.pickle'
from os import path
if path.exists(filename):
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
try:
valuesReturned.append(len(df))
except:
zeroValueCount += 1
count += 1
import numpy as np
print(np.average(valuesReturned))
print(np.std(valuesReturned))
print(zeroValueCount)
print(count)
def assignRegion(cityLevel, filename, outputFile):
import os
outputDirAssignRegion = 'AssignRegion/'
if not os.path.exists(outputDirAssignRegion):
os.mkdir(outputDirAssignRegion)
outputDirAssignRegion = 'AssignRegionWeightSquared/'
if not os.path.exists(outputDirAssignRegion):
os.mkdir(outputDirAssignRegion)
outputDirAssignRegion = 'AssignRegion/'
if squaredWeight:
outputDirAssignRegion = 'AssignRegionWeightSquared/'
isoToLat, isoToLong = getCountryInfo()
print(isoToLat)
print(isoToLong)
import os
if cityLevel:
outputDir = 'GoogleTrendsCity/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
else:
outputDir = 'GoogleTrendsCountry/'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
import pickle
infile = open(filename,'rb')
kw_list = pickle.load(infile)
infile.close()
noData = 0
noWeightsOver0 = 0
rows = [['keyword', 'using top 1', 'using top 3', 'using weight > 50', 'all']]
keywordToRegion1 = {}
keywordToRegion2 = {}
keywordToRegion3 = {}
keywordToRegion4 = {}
for keyword in kw_list:
if keyword != 'con':
filename = outputDir+ keyword + '.pickle'
from os import path
if path.exists(filename):
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
dataReturnedByTrends = False
try:
weights = list(df['value'])
weightsValues = []
for value in weights:
weightsValues.append(value[0])
df['weights'] = weightsValues
df = df.loc[df['weights'] > 0]
dataReturnedByTrends = True
except:
noData += 1
if dataReturnedByTrends:
if len(df) > 0:
df1 = df.nlargest(1, 'weights')
df2 = df.nlargest(3, 'weights')
df3 = df.loc[df['weights'] > 50]
df4 = df
label1 = predictRegion(cityLevel, df1, isoToLong)
if label1 != None:
keywordToRegion1[keyword] = label1
label2 = predictRegion(cityLevel, df2, isoToLong)
if label2 != None:
keywordToRegion2[keyword] = label2
label3 = predictRegion(cityLevel, df3, isoToLong)
if label3 != None:
keywordToRegion3[keyword] = label3
label4 = predictRegion(cityLevel, df4, isoToLong)
if label4 != None:
keywordToRegion4[keyword] = label4
if label1 != None or label2 != None or label3 != None or label4 != None:
rows.append([keyword, label1, label2, label3, label4])
else:
noWeightsOver0 += 1
print(str(noData) + " out of " + str(len(kw_list)) + " tokens had no data.")
print(str(noWeightsOver0) + " out of " + str(len(kw_list)) + " tokens had no weights.")
writeRowsToCSV(rows, outputDirAssignRegion+outputFile)
rows = [['Resriction', 'Predictions', 'NA_SA', 'AF_EUR', 'AS_OC', 'Total Accuracy', 'Total Predictions']]
rows.append(['using top 1']+evaluatePredictions(keywordToRegion1))
rows.append(['using top 3']+evaluatePredictions(keywordToRegion2))
rows.append(['using weight > 50']+evaluatePredictions(keywordToRegion3))
rows.append(['all']+evaluatePredictions(keywordToRegion4))
writeRowsToCSV(rows, outputDirAssignRegion+"Performance"+outputFile)
def predictRegion(cityLevel, df, isoToLong):
import numpy as np
if cityLevel:
geoNameToCoordinates = dict(zip(list(df["geoName"]), list(df['coordinates'])))
geoNameToWeight = dict(zip(list(df["geoName"]), list(df['weights'])))
label = None
l1 = 0
l2 = 0
l3 = 0
for geoName in geoNameToCoordinates:
coordinate = geoNameToCoordinates[geoName]
weight = geoNameToWeight[geoName]
if squaredWeight:
weight = weight*weight
long = coordinate['lng']
if long <= -25:
l1 += weight
elif long <= 65:
l2 += weight
else:
l3 += weight
Americas = l1
Africa_Europe = l2
Asia_Australia = l3
total = l1+l2+l3
if total > 0:
ratioAmericas = float(Americas)/float(total)
ratioAfrica_Europe = float(Africa_Europe)/float(total)
ratioAsia_Australia = float(Asia_Australia)/float(total)
ratioMax = np.max([ratioAmericas, ratioAfrica_Europe, ratioAsia_Australia])
label = None
if ratioAmericas == ratioMax:
label = "Americas"
elif ratioAfrica_Europe == ratioMax:
label = "Africa_Europe"
else:
label = "Asia_Australia"
else:
label = None
else:
countryISOCodeToWeight = dict(zip(list(df["geoCode"]), list(df['weights'])))
label = None
l1 = 0
l2 = 0
l3 = 0
for countryISOCode in countryISOCodeToWeight:
weight = countryISOCodeToWeight[countryISOCode]
long = isoToLong[countryISOCode]
if long <= -25:
l1 += weight
elif long <= 65:
l2 += weight
else:
l3 += weight
Americas = l1
Africa_Europe = l2
Asia_Australia = l3
total = l1+l2+l3
if total > 0:
ratioAmericas = float(Americas)/float(total)
ratioAfrica_Europe = float(Africa_Europe)/float(total)
ratioAsia_Australia = float(Asia_Australia)/float(total)
ratioMax = np.max([ratioAmericas, ratioAfrica_Europe, ratioAsia_Australia])
label = None
if ratioAmericas == ratioMax:
label = "Americas"
elif ratioAfrica_Europe == ratioMax:
label = "Africa_Europe"
else:
label = "Asia_Australia"
else:
label = None
return label
def getCountryInfo():
#file with average lat, long for each country
#country info from: https://gist.github.com/tadast/8827699#file-countries_codes_and_coordinates-csv
import pandas as pd
filePath = 'countries_codes_and_coordinates.csv'
df=pd.read_csv(filePath, encoding='utf-8')
print(df.columns)
temp = list(df["Alpha-2 code"])
countryList = []
for isoCode in temp:
countryList.append(str(isoCode).strip().replace('"', ''))
latitudeList = []
temp = list(df['Latitude (average)'])
for s in temp:
latitudeList.append(float(s.strip().replace('"', '')))
longitudeList = []
temp = list(df['Longitude (average)'])
for s in temp:
longitudeList.append(float(s.strip().replace('"', '')))
isoToLat = dict(zip(countryList, latitudeList))
isoToLong = dict(zip(countryList, longitudeList))
isoToLat['CW'] = 12.1696
isoToLong['CW'] = -68.9900
isoToLat['XK'] = 42.6026
isoToLong['XK'] = 20.9030
isoToLat['SX'] = 18.0425
isoToLong['SX'] = -63.0548
isoToLat['MF'] = 18.0826
isoToLong['MF'] = -63.0523
isoToLat['AX'] = 60.1785
isoToLong['AX'] = 19.9156
isoToLat['BL'] = 17.9000
isoToLong['BL'] = -62.8333
isoToLat['BQ'] = 12.1684
isoToLong['BQ'] = -68.3082
return isoToLat, isoToLong
def writeRowsToCSV(rows, fileToWriteToCSV):
import csv
if len(rows) > 0:
with open(fileToWriteToCSV, "w", encoding='utf-8') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(rows)
fp.close()
print("Written " + str(len(rows)) + " rows to: " + fileToWriteToCSV)
def evaluatePredictions(tokenToPrediction):
import pandas as pd
filePath = "Input/combineDBsCoordinateGroundTruthDiv3.csv"
df=pd.read_csv(filePath, encoding='utf-8')
tokenToLabel = dict(zip(list(df["id"]), list(df['label'])))
l1 = 0
l2 = 0
l3 = 0
for token in tokenToPrediction:
prediction = tokenToPrediction[token]
if prediction == 'Americas':
l1 += 1
elif prediction == 'Africa_Europe':
l2 += 1
else:
l3 += 1
print(str(l1) + ", " + str(l2) + ", " + str(l3) + " Americas vs. Africa_Europe vs. Asia_Australia")
correct = {'Americas':0,'Africa_Europe':0,'Asia_Australia':0}
wrong = {'Americas':0,'Africa_Europe':0,'Asia_Australia':0}
for token in tokenToPrediction:
label = tokenToLabel[token]
prediction = tokenToPrediction[token]
if label == prediction:
if label == 'Americas':
correct['Americas'] += 1
elif label == 'Africa_Europe':
correct['Africa_Europe'] += 1
elif label == 'Asia_Australia':
correct['Asia_Australia'] += 1
else:
print("unknown label")
import sys
sys.exit()
else:
if label == 'Americas':
wrong['Americas'] += 1
elif label == 'Africa_Europe':
wrong['Africa_Europe'] += 1
elif label == 'Asia_Australia':
wrong['Asia_Australia'] += 1
else:
print("unknown label")
import sys
sys.exit()
import numpy as np
accuracy = float(np.sum(list(correct.values())))/float(np.sum(list(correct.values()))+np.sum(list(wrong.values())))
row = []
predictions = []
for key in ['Americas', 'Africa_Europe', 'Asia_Australia']:
predictions.append(float(correct[key]+wrong[key]))
precision = []
for key in ['Americas', 'Africa_Europe', 'Asia_Australia']:
precision.append(round(float(correct[key])/float(correct[key]+wrong[key])*100,2))
row = [str(predictions)]+precision
row += [round(accuracy*100, 2), float(np.sum(list(correct.values()))+np.sum(list(wrong.values())))]
return row
def compareQueryCityLocationVsTopTrendingCityLocation():
rows = [['query city', 'query city geo', 'top Google Trends city', 'top city geo', 'distance between two']]
distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends = []
noWeightsOver0 = 0
noData = 0
filename = "allCities.pickle"
import pickle
infile = open(filename,'rb')
cityToLatLong = pickle.load(infile)
infile.close()
count = 0
for cityName in cityToLatLong:
if not '/' in cityName:
queryCityCoordinates = (cityToLatLong[cityName]['lat'], cityToLatLong[cityName]['lng'])
queryCityName = cityName
outputDir = 'GoogleTrendsCity/'
filename = outputDir+ cityName + '.pickle'
from os import path
if path.exists(filename):
count += 1
import pickle
infile = open(filename,'rb')
df = pickle.load(infile)
infile.close()
try:
weights = list(df['value'])
weightsValues = []
for value in weights:
weightsValues.append(value[0])
df['weights'] = weightsValues
df = df.loc[df['weights'] > 0]
if len(df) > 0:
df1 = df.nlargest(1, 'weights')
topGoogleTrendCityCoordinates = list(df1['coordinates'])[0]
topGoogleTrendCityName = list(df1['geoName'])[0]
topGoogleTrendCityCoordinates = (topGoogleTrendCityCoordinates['lat'], topGoogleTrendCityCoordinates['lng'])
from geopy.distance import geodesic
from geopy.distance import great_circle
distanceBetweenTheTwo = geodesic(queryCityCoordinates, topGoogleTrendCityCoordinates).miles
distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends.append(distanceBetweenTheTwo)
rows.append([queryCityName, str(queryCityCoordinates), topGoogleTrendCityName, str(topGoogleTrendCityCoordinates), distanceBetweenTheTwo])
else:
noWeightsOver0 += 1
except:
noData += 1
print(str(noData) + " out of " + str(count) + " tokens had no data.")
print(str(noWeightsOver0) + " out of " + str(count) + " tokens had no weights.")
import numpy as np
print(np.average(distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends))
print(np.std(distanceBetweenGoogleQueryCityAndTopCityFromGoogleTrends))
writeRowsToCSV(rows, 'topCityAnalysis.csv')
if __name__ == '__main__':
pass
step1 = False
if step1:
performCollection(True, 'Input/459.pickle') #Google Trends at city level
performCollection(True, 'Input/3183.pickle') #Google Trends at city level
performCollection(False, 'Input/459.pickle') #Google Trends at country level
performCollection(False, 'Input/3183.pickle') #Google Trends at country level
'''Google Trends does not always return the same number of cities
the following code examines average/standard deviation for the number of cities returned'''
if False:
averageAndStdDevAcrossAssociationsMadeByGoogle(True, 'Input/459.pickle')
averageAndStdDevAcrossAssociationsMadeByGoogle(True, 'Input/3183.pickle')
averageAndStdDevAcrossAssociationsMadeByGoogle(False, 'Input/459.pickle')
averageAndStdDevAcrossAssociationsMadeByGoogle(False, 'Input/3183.pickle')
step2 = True
if step2:
squaredWeight = True #This parameter raises the weight associated by Google via weight=weight*weight
filename = 'Input/459.pickle'
outputFilename = '459.csv'
assignRegion(True, filename, str(True)+outputFilename)
assignRegion(False, filename, str(False)+outputFilename)
filename = 'Input/3183.pickle'
outputFilename = '3183.csv'
assignRegion(True, filename, str(True)+outputFilename)
assignRegion(False, filename, str(False)+outputFilename)
'''Google Trends at city resolution associates tokens with city locations
For each city, the city name and its coordinates are stored in file "allCities.pickle"
Next we send each city name to Google Trends and utilize the top city result
For example 'chicago' is sent and the top city result from Google Trends is returned
The coordinates for both city query and the Google trend city are known
These coordinates are used to compute distance in miles.
Over 4789 cities on average the top city result from Google Trends is 362 miles away +/- 1335 miles.
So Google Trends not same as geocoding, but for query such as Moscow Google is able to capture that
this query is not likely to be utilized by Russian speakers in Moscow since those would like utilize
Cyrilis version.
The results of comparison for each city stored in: topCityAnalysis.csv'''
step3 = False
if step3:
formCityList('Input/3183.pickle') #forms list of cities from Google Trend Associations, stores into "allCities.pickle"
performCollection(True, "allCities.pickle") #Google Trends at city level
compareQueryCityLocationVsTopTrendingCityLocation()
|
apanasyu/GoogleTrends
|
Main.py
|
Main.py
|
py
| 20,395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41562683103
|
# It's a combination of two things: [merging and sorting]!
# Exploits the fact that arrays of 0 or 1 element are always sorted
# Works by decomposing an array into smaller arrays of 0 or 1 elements,
# then building up a newly sorted array
# STEPS
# 1. Divide the array, e.j [1, 2, 4, 3] => [1, 2], [4, 3]
# 2. Divde the new array again: [1, 2] => [1], [2] && [4, 3] => [4], [3]
# 3. Join and sort the single items: [1], [2] => [1, 2] && [4], [3] => [3, 4]
# 4. join and sort the arrays again [1, 2] && [3, 4] => [1, 2, 3, 4]
# Best time complexity: O(n log n)
# Time complexity average: O(n log n)
# Time complexity worst: O(n log n)
# Space complexity: O(n)
# Notes: For this algorithm does not care if the data is sorted
# or the data length also there is not edge cases
def merge(arrA, arrB):
i = 0
j = 0
result = []
while(i <= len(arrA) and j <= len(arrB)):
arrAIsLooped = (i >= len(arrA))
arrAValue = arrA[i] if not arrAIsLooped else -1
arrBIsLooped = (j >= len(arrB))
arrBValue = arrB[j] if not arrBIsLooped else -1
if arrAIsLooped and arrBIsLooped:
break
if (arrAValue < arrBValue) and not arrAIsLooped:
i += 1
result.append(arrAValue)
continue
if (arrBValue < arrAValue) and not arrBIsLooped:
j += 1
result.append(arrBValue)
continue
if not arrAIsLooped and arrBIsLooped:
nextLoop = i + 1
nextItem = arrA[nextLoop] if (nextLoop + 1) <= len(arrA) else None
i += 1
if nextItem:
result.append(arrAValue if arrAValue < nextItem else nextItem)
else:
result.append(arrAValue)
if not arrBIsLooped and arrAIsLooped:
nextLoop = j + 1
nextItem = arrB[nextLoop] if nextLoop + 1 <= len(arrB) else None
j += 1
if (nextItem):
result.append(arrBValue if arrBValue < nextItem else nextItem)
else:
result.append(arrBValue)
return result
def merge_sort(arr):
if len(arr) <= 1:
return arr
middle = len(arr) // 2
left_arr = merge_sort(arr[:middle])
rigth_arr = merge_sort(arr[middle:])
return merge(left_arr, rigth_arr)
print(merge_sort([10,24,76,73, 199]))
|
Wainercrb/data-structures
|
merge-sort/main.py
|
main.py
|
py
| 2,396 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25218471687
|
from django.db import models
from article.models import Article
from users.models import User
from ckeditor.fields import RichTextField
from mptt.models import MPTTModel, TreeForeignKey
# Create your models here.
class Comment(MPTTModel):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
body = RichTextField()
created = models.DateTimeField(auto_now_add=True)
#mptt树形结构
parent = TreeForeignKey(
'self',
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='children',
)
#记录二级评论回复
reply_to = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name='replyers',
)
class MPTTMeta:
order_insertion_by = ['created']
def __str__(self):
return self.body[:20]
|
MenGG6/personal-blog
|
comment/models.py
|
models.py
|
py
| 824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12646866981
|
from django import forms
from .models import Reservation, Testimonial
class ReservationForm(forms.ModelForm):
name = forms.CharField(label='Your Name', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'placeholder': 'Your Name'
}
))
email = forms.EmailField(label="Your Email", widget=forms.EmailInput(
attrs={
'class': 'form-control',
'id': 'email',
'placeholder': 'Your Email'
}
))
reservation_date = forms.DateTimeField(label="Date & Time",
widget=forms.DateTimeInput(
attrs={
'class': 'form-control datetimepicker-input',
'id': 'datetime',
'placeholder': 'Date & Time',
'data-target': '#date3',
'data-toggle': 'datetimepicker'
}
))
people = forms.IntegerField(label="No Of People", widget=forms.NumberInput(
attrs={
'class': 'form-control',
'id': 'people',
'placeholder': 'No Of People'
}
))
request = forms.CharField(label="Special Request", widget=forms.Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'placeholder': 'Special Request',
'style': 'height: 100px;'
}
))
class Meta:
model = Reservation
fields = (
'name',
'email',
'reservation_date',
'people',
'request'
)
class ContactForm(forms.Form):
name = forms.CharField(label='Your Name', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'placeholder': 'Your Name'
}
))
email = forms.EmailField(label="Your Email", widget=forms.EmailInput(
attrs={
'class': 'form-control',
'id': 'email',
'placeholder': 'Your Email'
}
))
subject = forms.CharField(label='Subject', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'subject',
'placeholder': 'Subject'
}
))
message = forms.CharField(label="Message", widget=forms.Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'placeholder': 'Leave a message here',
'style': 'height: 150px;'
}
))
class FeedbackForm(forms.ModelForm):
name = forms.CharField(label='Your Name', widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'placeholder': 'Your Name'
}
))
profession = forms.CharField(label="Your Profession", widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'email',
'placeholder': 'Your Profession'
}
))
feedback = forms.CharField(label="Feedback", widget=forms.Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'placeholder': 'Feedback...',
'style': 'height: 150px;'
}
))
photo = forms.FileField(label='Photo', widget=forms.FileInput(
attrs={
'type': 'file',
'class': 'form-control',
'id': 'subject',
'placeholder': 'Photo'
}
))
class Meta:
model = Testimonial
fields = (
'name',
'profession',
'feedback',
'photo'
)
|
Dantes696/restaraunt
|
res/forms.py
|
forms.py
|
py
| 4,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.