code_tokens
stringlengths 60
3.94k
| label
int64 0
1
| doc
stringlengths 16
98
| docstring_tokens
stringlengths 0
1.41k
| idx
stringlengths 13
17
| code
stringlengths 87
6.4k
|
---|---|---|---|---|---|
def range ( self , chromosome , start , stop , exact = False ) : return self . _clone ( filters = [ GenomicFilter ( chromosome , start , stop , exact ) ] )
| 0 |
range object is not callable python filter
|
Shortcut to do range filters on genomic datasets .
|
cosqa-train-16500
|
def range(self, chromosome, start, stop, exact=False):
"""
Shortcut to do range filters on genomic datasets.
"""
return self._clone(
filters=[GenomicFilter(chromosome, start, stop, exact)])
|
def get_neg_infinity ( dtype ) : if issubclass ( dtype . type , ( np . floating , np . integer ) ) : return - np . inf if issubclass ( dtype . type , np . complexfloating ) : return - np . inf - 1j * np . inf return NINF
| 0 |
make a value of var infinity in python
|
Return an appropriate positive infinity for this dtype .
|
cosqa-train-16501
|
def get_neg_infinity(dtype):
"""Return an appropriate positive infinity for this dtype.
Parameters
----------
dtype : np.dtype
Returns
-------
fill_value : positive infinity value corresponding to this dtype.
"""
if issubclass(dtype.type, (np.floating, np.integer)):
return -np.inf
if issubclass(dtype.type, np.complexfloating):
return -np.inf - 1j * np.inf
return NINF
|
def open_json ( file_name ) : with open ( file_name , "r" ) as json_data : data = json . load ( json_data ) return data
| 0 |
read a json file and load python
|
returns json contents as string
|
cosqa-train-16502
|
def open_json(file_name):
"""
returns json contents as string
"""
with open(file_name, "r") as json_data:
data = json.load(json_data)
return data
|
def strToBool ( val ) : if isinstance ( val , str ) : val = val . lower ( ) return val in [ 'true' , 'on' , 'yes' , True ]
| 1 |
make boolean string in python
|
Helper function to turn a string representation of true into boolean True .
|
cosqa-train-16503
|
def strToBool(val):
"""
Helper function to turn a string representation of "true" into
boolean True.
"""
if isinstance(val, str):
val = val.lower()
return val in ['true', 'on', 'yes', True]
|
def be_array_from_bytes ( fmt , data ) : arr = array . array ( str ( fmt ) , data ) return fix_byteorder ( arr )
| 1 |
read binary data byte array python
|
Reads an array from bytestring with big - endian data .
|
cosqa-train-16504
|
def be_array_from_bytes(fmt, data):
"""
Reads an array from bytestring with big-endian data.
"""
arr = array.array(str(fmt), data)
return fix_byteorder(arr)
|
def parse_date ( s ) : try : return datetime . date ( int ( s [ : 4 ] ) , int ( s [ 5 : 7 ] ) , int ( s [ 8 : 10 ] ) ) except ValueError : # other accepted format used in one-day data set return datetime . datetime . strptime ( s , '%d %B %Y' ) . date ( )
| 0 |
make date vairable from string python
|
Fast %Y - %m - %d parsing .
|
cosqa-train-16505
|
def parse_date(s):
"""Fast %Y-%m-%d parsing."""
try:
return datetime.date(int(s[:4]), int(s[5:7]), int(s[8:10]))
except ValueError: # other accepted format used in one-day data set
return datetime.datetime.strptime(s, '%d %B %Y').date()
|
def read_numpy ( fd , byte_order , dtype , count ) : return numpy . fromfile ( fd , byte_order + dtype [ - 1 ] , count )
| 1 |
read file into python with numpy
|
Read tag data from file and return as numpy array .
|
cosqa-train-16506
|
def read_numpy(fd, byte_order, dtype, count):
"""Read tag data from file and return as numpy array."""
return numpy.fromfile(fd, byte_order+dtype[-1], count)
|
def _clear ( self ) : draw = ImageDraw . Draw ( self . _background_image ) draw . rectangle ( self . _device . bounding_box , fill = "black" ) del draw
| 0 |
make image clear with python
|
Helper that clears the composition .
|
cosqa-train-16507
|
def _clear(self):
"""
Helper that clears the composition.
"""
draw = ImageDraw.Draw(self._background_image)
draw.rectangle(self._device.bounding_box,
fill="black")
del draw
|
def import_public_rsa_key_from_file ( filename ) : with open ( filename , "rb" ) as key_file : public_key = serialization . load_pem_public_key ( key_file . read ( ) , backend = default_backend ( ) ) return public_key
| 0 |
read private key rsa python
|
Read a public RSA key from a PEM file .
|
cosqa-train-16508
|
def import_public_rsa_key_from_file(filename):
"""
Read a public RSA key from a PEM file.
:param filename: The name of the file
:param passphrase: A pass phrase to use to unpack the PEM file.
:return: A
cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey instance
"""
with open(filename, "rb") as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend=default_backend())
return public_key
|
def mouse_move_event ( self , event ) : self . example . mouse_position_event ( event . x ( ) , event . y ( ) )
| 1 |
make mouse move in python
|
Forward mouse cursor position events to the example
|
cosqa-train-16509
|
def mouse_move_event(self, event):
"""
Forward mouse cursor position events to the example
"""
self.example.mouse_position_event(event.x(), event.y())
|
def get_as_bytes ( self , s3_path ) : ( bucket , key ) = self . _path_to_bucket_and_key ( s3_path ) obj = self . s3 . Object ( bucket , key ) contents = obj . get ( ) [ 'Body' ] . read ( ) return contents
| 0 |
read s3 object python
|
Get the contents of an object stored in S3 as bytes
|
cosqa-train-16510
|
def get_as_bytes(self, s3_path):
"""
Get the contents of an object stored in S3 as bytes
:param s3_path: URL for target S3 location
:return: File contents as pure bytes
"""
(bucket, key) = self._path_to_bucket_and_key(s3_path)
obj = self.s3.Object(bucket, key)
contents = obj.get()['Body'].read()
return contents
|
def load_data ( filename ) : data = pandas . read_csv ( filename , header = None , delimiter = '\t' , skiprows = 9 ) return data . as_matrix ( )
| 0 |
make python read csv and represent as matrix
|
: rtype : numpy matrix
|
cosqa-train-16511
|
def load_data(filename):
"""
:rtype : numpy matrix
"""
data = pandas.read_csv(filename, header=None, delimiter='\t', skiprows=9)
return data.as_matrix()
|
def _readuntil ( f , end = _TYPE_END ) : buf = bytearray ( ) byte = f . read ( 1 ) while byte != end : if byte == b'' : raise ValueError ( 'File ended unexpectedly. Expected end byte {}.' . format ( end ) ) buf += byte byte = f . read ( 1 ) return buf
| 1 |
read until end of file bytes python
|
Helper function to read bytes until a certain end byte is hit
|
cosqa-train-16512
|
def _readuntil(f, end=_TYPE_END):
"""Helper function to read bytes until a certain end byte is hit"""
buf = bytearray()
byte = f.read(1)
while byte != end:
if byte == b'':
raise ValueError('File ended unexpectedly. Expected end byte {}.'.format(end))
buf += byte
byte = f.read(1)
return buf
|
def hash_producer ( * args , * * kwargs ) : return hashlib . md5 ( six . text_type ( uuid . uuid4 ( ) ) . encode ( 'utf-8' ) ) . hexdigest ( )
| 0 |
make python secret key urandom
|
Returns a random hash for a confirmation secret .
|
cosqa-train-16513
|
def hash_producer(*args, **kwargs):
""" Returns a random hash for a confirmation secret. """
return hashlib.md5(six.text_type(uuid.uuid4()).encode('utf-8')).hexdigest()
|
def load_yaml ( filepath ) : with open ( filepath ) as f : txt = f . read ( ) return yaml . load ( txt )
| 1 |
read yaml file in python
|
Convenience function for loading yaml - encoded data from disk .
|
cosqa-train-16514
|
def load_yaml(filepath):
"""Convenience function for loading yaml-encoded data from disk."""
with open(filepath) as f:
txt = f.read()
return yaml.load(txt)
|
def to_camel ( s ) : # r'(?!^)_([a-zA-Z]) original regex wasn't process first groups return re . sub ( r'_([a-zA-Z])' , lambda m : m . group ( 1 ) . upper ( ) , '_' + s )
| 0 |
make the first letter lower case python
|
: param string s : under_scored string to be CamelCased : return : CamelCase version of input : rtype : str
|
cosqa-train-16515
|
def to_camel(s):
"""
:param string s: under_scored string to be CamelCased
:return: CamelCase version of input
:rtype: str
"""
# r'(?!^)_([a-zA-Z]) original regex wasn't process first groups
return re.sub(r'_([a-zA-Z])', lambda m: m.group(1).upper(), '_' + s)
|
def read_dict_from_file ( file_path ) : with open ( file_path ) as file : lines = file . read ( ) . splitlines ( ) obj = { } for line in lines : key , value = line . split ( ':' , maxsplit = 1 ) obj [ key ] = eval ( value ) return obj
| 1 |
reading a text file into a dictionary python
|
Read a dictionary of strings from a file
|
cosqa-train-16516
|
def read_dict_from_file(file_path):
"""
Read a dictionary of strings from a file
"""
with open(file_path) as file:
lines = file.read().splitlines()
obj = {}
for line in lines:
key, value = line.split(':', maxsplit=1)
obj[key] = eval(value)
return obj
|
def get_translucent_cmap ( r , g , b ) : class TranslucentCmap ( BaseColormap ) : glsl_map = """
vec4 translucent_fire(float t) {{
return vec4({0}, {1}, {2}, t);
}}
""" . format ( r , g , b ) return TranslucentCmap ( )
| 0 |
make translucent color python
|
cosqa-train-16517
|
def get_translucent_cmap(r, g, b):
class TranslucentCmap(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {{
return vec4({0}, {1}, {2}, t);
}}
""".format(r, g, b)
return TranslucentCmap()
|
|
def get_jsonparsed_data ( url ) : response = urlopen ( url ) data = response . read ( ) . decode ( 'utf-8' ) return json . loads ( data )
| 0 |
reading json from url in python
|
Receive the content of url parse it as JSON and return the object .
|
cosqa-train-16518
|
def get_jsonparsed_data(url):
"""Receive the content of ``url``, parse it as JSON and return the
object.
"""
response = urlopen(url)
data = response.read().decode('utf-8')
return json.loads(data)
|
def reduce_freqs ( freqlist ) : allfreqs = np . zeros_like ( freqlist [ 0 ] ) for f in freqlist : allfreqs += f return allfreqs
| 0 |
makeing a frequency list python
|
Add up a list of freq counts to get the total counts .
|
cosqa-train-16519
|
def reduce_freqs(freqlist):
"""
Add up a list of freq counts to get the total counts.
"""
allfreqs = np.zeros_like(freqlist[0])
for f in freqlist:
allfreqs += f
return allfreqs
|
def __setitem__ ( self , field , value ) : return self . _client . hset ( self . key_prefix , field , self . _dumps ( value ) )
| 0 |
redis set key python dict
|
: see :: meth : RedisMap . __setitem__
|
cosqa-train-16520
|
def __setitem__(self, field, value):
""" :see::meth:RedisMap.__setitem__ """
return self._client.hset(self.key_prefix, field, self._dumps(value))
|
def to_snake_case ( text ) : s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , text ) return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( )
| 1 |
maker a string lowercase pythong
|
Convert to snake case .
|
cosqa-train-16521
|
def to_snake_case(text):
"""Convert to snake case.
:param str text:
:rtype: str
:return:
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
def get_from_human_key ( self , key ) : if key in self . _identifier_map : return self . _identifier_map [ key ] raise KeyError ( key )
| 1 |
referring to a key in python
|
Return the key ( aka database value ) of a human key ( aka Python identifier ) .
|
cosqa-train-16522
|
def get_from_human_key(self, key):
"""Return the key (aka database value) of a human key (aka Python identifier)."""
if key in self._identifier_map:
return self._identifier_map[key]
raise KeyError(key)
|
def retrieve_import_alias_mapping ( names_list ) : import_alias_names = dict ( ) for alias in names_list : if alias . asname : import_alias_names [ alias . asname ] = alias . name return import_alias_names
| 1 |
making an alias list in python
|
Creates a dictionary mapping aliases to their respective name . import_alias_names is used in module_definitions . py and visit_Call
|
cosqa-train-16523
|
def retrieve_import_alias_mapping(names_list):
"""Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call"""
import_alias_names = dict()
for alias in names_list:
if alias.asname:
import_alias_names[alias.asname] = alias.name
return import_alias_names
|
def get_unixtime_registered ( self ) : doc = self . _request ( self . ws_prefix + ".getInfo" , True ) return int ( doc . getElementsByTagName ( "registered" ) [ 0 ] . getAttribute ( "unixtime" ) )
| 0 |
registration time stored in python
|
Returns the user s registration date as a UNIX timestamp .
|
cosqa-train-16524
|
def get_unixtime_registered(self):
"""Returns the user's registration date as a UNIX timestamp."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return int(doc.getElementsByTagName("registered")[0].getAttribute("unixtime"))
|
def heappush_max ( heap , item ) : heap . append ( item ) _siftdown_max ( heap , 0 , len ( heap ) - 1 )
| 0 |
max heap with double link list python
|
Push item onto heap maintaining the heap invariant .
|
cosqa-train-16525
|
def heappush_max(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown_max(heap, 0, len(heap) - 1)
|
def split_comma_argument ( comma_sep_str ) : terms = [ ] for term in comma_sep_str . split ( ',' ) : if term : terms . append ( term ) return terms
| 1 |
regula expression for specific string in comma separated strings python
|
Split a comma separated option into a list .
|
cosqa-train-16526
|
def split_comma_argument(comma_sep_str):
"""Split a comma separated option into a list."""
terms = []
for term in comma_sep_str.split(','):
if term:
terms.append(term)
return terms
|
def _multiline_width ( multiline_s , line_width_fn = len ) : return max ( map ( line_width_fn , re . split ( "[\r\n]" , multiline_s ) ) )
| 0 |
maximum number of characters per line in python
|
Visible width of a potentially multiline content .
|
cosqa-train-16527
|
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
|
def input_dir ( self ) : return os . path . abspath ( os . path . dirname ( self . inputs [ 'job_ini' ] ) )
| 0 |
relative path to config file in python
|
: returns : absolute path to where the job . ini is
|
cosqa-train-16528
|
def input_dir(self):
"""
:returns: absolute path to where the job.ini is
"""
return os.path.abspath(os.path.dirname(self.inputs['job_ini']))
|
def shader_string ( body , glsl_version = '450 core' ) : line_count = len ( body . split ( '\n' ) ) line_number = inspect . currentframe ( ) . f_back . f_lineno + 1 - line_count return """\
#version %s
%s
""" % ( glsl_version , shader_substring ( body , stack_frame = 2 ) )
| 1 |
maya python create shader
|
Call this method from a function that defines a literal shader string as the body argument . Dresses up a shader string in three ways : 1 ) Insert #version at the top 2 ) Insert #line number declaration 3 ) un - indents The line number information can help debug glsl compile errors . The version string needs to be the very first characters in the shader which can be distracting requiring backslashes or other tricks . The unindenting allows you to type the shader code at a pleasing indent level in your python method while still creating an unindented GLSL string at the end .
|
cosqa-train-16529
|
def shader_string(body, glsl_version='450 core'):
"""
Call this method from a function that defines a literal shader string as the "body" argument.
Dresses up a shader string in three ways:
1) Insert #version at the top
2) Insert #line number declaration
3) un-indents
The line number information can help debug glsl compile errors.
The version string needs to be the very first characters in the shader,
which can be distracting, requiring backslashes or other tricks.
The unindenting allows you to type the shader code at a pleasing indent level
in your python method, while still creating an unindented GLSL string at the end.
"""
line_count = len(body.split('\n'))
line_number = inspect.currentframe().f_back.f_lineno + 1 - line_count
return """\
#version %s
%s
""" % (glsl_version, shader_substring(body, stack_frame=2))
|
def softplus ( attrs , inputs , proto_obj ) : new_attrs = translation_utils . _add_extra_attributes ( attrs , { 'act_type' : 'softrelu' } ) return 'Activation' , new_attrs , inputs
| 1 |
relu activation function python
|
Applies the sofplus activation function element - wise to the input .
|
cosqa-train-16530
|
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs
|
def get_file_md5sum ( path ) : with open ( path , 'rb' ) as fh : h = str ( hashlib . md5 ( fh . read ( ) ) . hexdigest ( ) ) return h
| 1 |
md5 value of a file python
|
Calculate the MD5 hash for a file .
|
cosqa-train-16531
|
def get_file_md5sum(path):
"""Calculate the MD5 hash for a file."""
with open(path, 'rb') as fh:
h = str(hashlib.md5(fh.read()).hexdigest())
return h
|
def get_remote_content ( filepath ) : with hide ( 'running' ) : temp = BytesIO ( ) get ( filepath , temp ) content = temp . getvalue ( ) . decode ( 'utf-8' ) return content . strip ( )
| 0 |
remotely access file in python
|
A handy wrapper to get a remote file content
|
cosqa-train-16532
|
def get_remote_content(filepath):
""" A handy wrapper to get a remote file content """
with hide('running'):
temp = BytesIO()
get(filepath, temp)
content = temp.getvalue().decode('utf-8')
return content.strip()
|
def __len__ ( self ) : length = 0 for typ , siz , _ in self . format : length += siz return length
| 0 |
measure length of object in python
|
This will equal 124 for the V1 database .
|
cosqa-train-16533
|
def __len__(self):
""" This will equal 124 for the V1 database. """
length = 0
for typ, siz, _ in self.format:
length += siz
return length
|
def remove_bad ( string ) : remove = [ ':' , ',' , '(' , ')' , ' ' , '|' , ';' , '\'' ] for c in remove : string = string . replace ( c , '_' ) return string
| 0 |
remove all blanks from list python
|
remove problem characters from string
|
cosqa-train-16534
|
def remove_bad(string):
"""
remove problem characters from string
"""
remove = [':', ',', '(', ')', ' ', '|', ';', '\'']
for c in remove:
string = string.replace(c, '_')
return string
|
def array_bytes ( array ) : return np . product ( array . shape ) * np . dtype ( array . dtype ) . itemsize
| 1 |
memory size of an array python
|
Estimates the memory of the supplied array in bytes
|
cosqa-train-16535
|
def array_bytes(array):
""" Estimates the memory of the supplied array in bytes """
return np.product(array.shape)*np.dtype(array.dtype).itemsize
|
def unique ( seq ) : cleaned = [ ] for each in seq : if each not in cleaned : cleaned . append ( each ) return cleaned
| 0 |
remove all empty elements from list python
|
Return the unique elements of a collection even if those elements are unhashable and unsortable like dicts and sets
|
cosqa-train-16536
|
def unique(seq):
"""Return the unique elements of a collection even if those elements are
unhashable and unsortable, like dicts and sets"""
cleaned = []
for each in seq:
if each not in cleaned:
cleaned.append(each)
return cleaned
|
def cross_join ( df1 , df2 ) : if len ( df1 ) == 0 : return df2 if len ( df2 ) == 0 : return df1 # Add as lists so that the new index keeps the items in # the order that they are added together all_columns = pd . Index ( list ( df1 . columns ) + list ( df2 . columns ) ) df1 [ 'key' ] = 1 df2 [ 'key' ] = 1 return pd . merge ( df1 , df2 , on = 'key' ) . loc [ : , all_columns ]
| 0 |
merge data frames with 2 columns and inner join in python
|
Return a dataframe that is a cross between dataframes df1 and df2
|
cosqa-train-16537
|
def cross_join(df1, df2):
"""
Return a dataframe that is a cross between dataframes
df1 and df2
ref: https://github.com/pydata/pandas/issues/5401
"""
if len(df1) == 0:
return df2
if len(df2) == 0:
return df1
# Add as lists so that the new index keeps the items in
# the order that they are added together
all_columns = pd.Index(list(df1.columns) + list(df2.columns))
df1['key'] = 1
df2['key'] = 1
return pd.merge(df1, df2, on='key').loc[:, all_columns]
|
def strip_accents ( string ) : return u'' . join ( ( character for character in unicodedata . normalize ( 'NFD' , string ) if unicodedata . category ( character ) != 'Mn' ) )
| 0 |
remove all non alphabet characters python
|
Strip all the accents from the string
|
cosqa-train-16538
|
def strip_accents(string):
"""
Strip all the accents from the string
"""
return u''.join(
(character for character in unicodedata.normalize('NFD', string)
if unicodedata.category(character) != 'Mn'))
|
def IsBinary ( self , filename ) : mimetype = mimetypes . guess_type ( filename ) [ 0 ] if not mimetype : return False # e.g. README, "real" binaries usually have an extension # special case for text files which don't start with text/ if mimetype in TEXT_MIMETYPES : return False return not mimetype . startswith ( "text/" )
| 1 |
mimetext is not defined python examples
|
Returns true if the guessed mimetyped isnt t in text group .
|
cosqa-train-16539
|
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
|
def remove_punctuation ( text , exceptions = [ ] ) : all_but = [ r'\w' , r'\s' ] all_but . extend ( exceptions ) pattern = '[^{}]' . format ( '' . join ( all_but ) ) return re . sub ( pattern , '' , text )
| 1 |
remove all punctuations from text python
|
Return a string with punctuation removed .
|
cosqa-train-16540
|
def remove_punctuation(text, exceptions=[]):
"""
Return a string with punctuation removed.
Parameters:
text (str): The text to remove punctuation from.
exceptions (list): List of symbols to keep in the given text.
Return:
str: The input text without the punctuation.
"""
all_but = [
r'\w',
r'\s'
]
all_but.extend(exceptions)
pattern = '[^{}]'.format(''.join(all_but))
return re.sub(pattern, '', text)
|
def SegmentMin ( a , ids ) : func = lambda idxs : np . amin ( a [ idxs ] , axis = 0 ) return seg_map ( func , a , ids ) ,
| 0 |
min function on an array python
|
Segmented min op .
|
cosqa-train-16541
|
def SegmentMin(a, ids):
"""
Segmented min op.
"""
func = lambda idxs: np.amin(a[idxs], axis=0)
return seg_map(func, a, ids),
|
def remove_bad ( string ) : remove = [ ':' , ',' , '(' , ')' , ' ' , '|' , ';' , '\'' ] for c in remove : string = string . replace ( c , '_' ) return string
| 0 |
remove blanks from the list python
|
remove problem characters from string
|
cosqa-train-16542
|
def remove_bad(string):
"""
remove problem characters from string
"""
remove = [':', ',', '(', ')', ' ', '|', ';', '\'']
for c in remove:
string = string.replace(c, '_')
return string
|
def copy_and_update ( dictionary , update ) : newdict = dictionary . copy ( ) newdict . update ( update ) return newdict
| 0 |
modifying values of a dictionary python
|
Returns an updated copy of the dictionary without modifying the original
|
cosqa-train-16543
|
def copy_and_update(dictionary, update):
"""Returns an updated copy of the dictionary without modifying the original"""
newdict = dictionary.copy()
newdict.update(update)
return newdict
|
def slugify ( string ) : string = re . sub ( '[^\w .-]' , '' , string ) string = string . replace ( " " , "-" ) return string
| 0 |
remove comma in a string python
|
Removes non - alpha characters and converts spaces to hyphens . Useful for making file names . Source : http : // stackoverflow . com / questions / 5574042 / string - slugification - in - python
|
cosqa-train-16544
|
def slugify(string):
"""
Removes non-alpha characters, and converts spaces to hyphens. Useful for making file names.
Source: http://stackoverflow.com/questions/5574042/string-slugification-in-python
"""
string = re.sub('[^\w .-]', '', string)
string = string.replace(" ", "-")
return string
|
def strip_spaces ( value , sep = None , join = True ) : value = value . strip ( ) value = [ v . strip ( ) for v in value . split ( sep ) ] join_sep = sep or ' ' return join_sep . join ( value ) if join else value
| 0 |
remove commas seperating characters in list python
|
Cleans trailing whitespaces and replaces also multiple whitespaces with a single space .
|
cosqa-train-16545
|
def strip_spaces(value, sep=None, join=True):
"""Cleans trailing whitespaces and replaces also multiple whitespaces with a single space."""
value = value.strip()
value = [v.strip() for v in value.split(sep)]
join_sep = sep or ' '
return join_sep.join(value) if join else value
|
def mostCommonItem ( lst ) : # This elegant solution from: http://stackoverflow.com/a/1518632/1760218 lst = [ l for l in lst if l ] if lst : return max ( set ( lst ) , key = lst . count ) else : return None
| 0 |
most common element in a list python
|
Choose the most common item from the list or the first item if all items are unique .
|
cosqa-train-16546
|
def mostCommonItem(lst):
"""Choose the most common item from the list, or the first item if all
items are unique."""
# This elegant solution from: http://stackoverflow.com/a/1518632/1760218
lst = [l for l in lst if l]
if lst:
return max(set(lst), key=lst.count)
else:
return None
|
def __normalize_list ( self , msg ) : if isinstance ( msg , list ) : msg = "" . join ( msg ) return list ( map ( lambda x : x . strip ( ) , msg . split ( "," ) ) )
| 0 |
remove commas string python list
|
Split message to list by commas and trim whitespace .
|
cosqa-train-16547
|
def __normalize_list(self, msg):
"""Split message to list by commas and trim whitespace."""
if isinstance(msg, list):
msg = "".join(msg)
return list(map(lambda x: x.strip(), msg.split(",")))
|
def mostCommonItem ( lst ) : # This elegant solution from: http://stackoverflow.com/a/1518632/1760218 lst = [ l for l in lst if l ] if lst : return max ( set ( lst ) , key = lst . count ) else : return None
| 0 |
most common element in list python
|
Choose the most common item from the list or the first item if all items are unique .
|
cosqa-train-16548
|
def mostCommonItem(lst):
"""Choose the most common item from the list, or the first item if all
items are unique."""
# This elegant solution from: http://stackoverflow.com/a/1518632/1760218
lst = [l for l in lst if l]
if lst:
return max(set(lst), key=lst.count)
else:
return None
|
def dump_nparray ( self , obj , class_name = numpy_ndarray_class_name ) : return { "$" + class_name : self . _json_convert ( obj . tolist ( ) ) }
| 1 |
remove dtype nparray python
|
numpy . ndarray dumper .
|
cosqa-train-16549
|
def dump_nparray(self, obj, class_name=numpy_ndarray_class_name):
"""
``numpy.ndarray`` dumper.
"""
return {"$" + class_name: self._json_convert(obj.tolist())}
|
def moving_average ( array , n = 3 ) : ret = _np . cumsum ( array , dtype = float ) ret [ n : ] = ret [ n : ] - ret [ : - n ] return ret [ n - 1 : ] / n
| 1 |
moving average array python
|
Calculates the moving average of an array .
|
cosqa-train-16550
|
def moving_average(array, n=3):
"""
Calculates the moving average of an array.
Parameters
----------
array : array
The array to have the moving average taken of
n : int
The number of points of moving average to take
Returns
-------
MovingAverageArray : array
The n-point moving average of the input array
"""
ret = _np.cumsum(array, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
|
def remove_dups ( seq ) : seen = set ( ) seen_add = seen . add return [ x for x in seq if not ( x in seen or seen_add ( x ) ) ]
| 1 |
remove duplicates without changing porder python
|
remove duplicates from a sequence preserving order
|
cosqa-train-16551
|
def remove_dups(seq):
"""remove duplicates from a sequence, preserving order"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
|
def reduce_multiline ( string ) : string = str ( string ) return " " . join ( [ item . strip ( ) for item in string . split ( "\n" ) if item . strip ( ) ] )
| 0 |
multiline strings without newline python
|
reduces a multiline string to a single line of text .
|
cosqa-train-16552
|
def reduce_multiline(string):
"""
reduces a multiline string to a single line of text.
args:
string: the text to reduce
"""
string = str(string)
return " ".join([item.strip()
for item in string.split("\n")
if item.strip()])
|
def get_element_with_id ( self , id ) : # Should we maintain a hashmap of ids to make this more efficient? Probably overkill. # TODO: Elements can contain nested elements (captions, footnotes, table cells, etc.) return next ( ( el for el in self . elements if el . id == id ) , None )
| 0 |
remove element by id in python
|
Return the element with the specified ID .
|
cosqa-train-16553
|
def get_element_with_id(self, id):
"""Return the element with the specified ID."""
# Should we maintain a hashmap of ids to make this more efficient? Probably overkill.
# TODO: Elements can contain nested elements (captions, footnotes, table cells, etc.)
return next((el for el in self.elements if el.id == id), None)
|
def remove_series ( self , series ) : if len ( self . all_series ( ) ) == 1 : raise ValueError ( "Cannot remove last series from %s" % str ( self ) ) self . _all_series . remove ( series ) series . _chart = None
| 0 |
remove element from series python
|
Removes a : py : class : . Series from the chart .
|
cosqa-train-16554
|
def remove_series(self, series):
"""Removes a :py:class:`.Series` from the chart.
:param Series series: The :py:class:`.Series` to remove.
:raises ValueError: if you try to remove the last\
:py:class:`.Series`."""
if len(self.all_series()) == 1:
raise ValueError("Cannot remove last series from %s" % str(self))
self._all_series.remove(series)
series._chart = None
|
def matrixTimesVector ( MM , aa ) : bb = np . zeros ( 3 , np . float ) for ii in range ( 3 ) : bb [ ii ] = np . sum ( MM [ ii , : ] * aa ) return bb
| 0 |
multiply each element of a matrix python
|
cosqa-train-16555
|
def matrixTimesVector(MM, aa):
"""
:param MM: A matrix of size 3x3
:param aa: A vector of size 3
:return: A vector of size 3 which is the product of the matrix by the vector
"""
bb = np.zeros(3, np.float)
for ii in range(3):
bb[ii] = np.sum(MM[ii, :] * aa)
return bb
|
|
def unique ( _list ) : ret = [ ] for item in _list : if item not in ret : ret . append ( item ) return ret
| 1 |
remove empty element in list python
|
Makes the list have unique items only and maintains the order
|
cosqa-train-16556
|
def unique(_list):
"""
Makes the list have unique items only and maintains the order
list(set()) won't provide that
:type _list list
:rtype: list
"""
ret = []
for item in _list:
if item not in ret:
ret.append(item)
return ret
|
def multiply ( self , number ) : return self . from_list ( [ x * number for x in self . to_list ( ) ] )
| 0 |
multiply in python using built in function
|
Return a Vector as the product of the vector and a real number .
|
cosqa-train-16557
|
def multiply(self, number):
"""Return a Vector as the product of the vector and a real number."""
return self.from_list([x * number for x in self.to_list()])
|
def clean_py_files ( path ) : for dirname , subdirlist , filelist in os . walk ( path ) : for f in filelist : if f . endswith ( 'py' ) : os . remove ( os . path . join ( dirname , f ) )
| 1 |
remove files in directory python
|
Removes all . py files .
|
cosqa-train-16558
|
def clean_py_files(path):
"""
Removes all .py files.
:param path: the path
:return: None
"""
for dirname, subdirlist, filelist in os.walk(path):
for f in filelist:
if f.endswith('py'):
os.remove(os.path.join(dirname, f))
|
def validate_string_list ( lst ) : if not isinstance ( lst , list ) : raise ValueError ( 'input %r must be a list' % lst ) for x in lst : if not isinstance ( x , basestring ) : raise ValueError ( 'element %r in list must be a string' % x )
| 0 |
must be str not list python
|
Validate that the input is a list of strings .
|
cosqa-train-16559
|
def validate_string_list(lst):
"""Validate that the input is a list of strings.
Raises ValueError if not."""
if not isinstance(lst, list):
raise ValueError('input %r must be a list' % lst)
for x in lst:
if not isinstance(x, basestring):
raise ValueError('element %r in list must be a string' % x)
|
def strip_accents ( text ) : normalized_str = unicodedata . normalize ( 'NFD' , text ) return '' . join ( [ c for c in normalized_str if unicodedata . category ( c ) != 'Mn' ] )
| 0 |
remove letters from string python for loop
|
Strip agents from a string .
|
cosqa-train-16560
|
def strip_accents(text):
"""
Strip agents from a string.
"""
normalized_str = unicodedata.normalize('NFD', text)
return ''.join([
c for c in normalized_str if unicodedata.category(c) != 'Mn'])
|
def close ( self ) : if self . db is not None : self . db . commit ( ) self . db . close ( ) self . db = None return
| 1 |
mysql python how to close the connection
|
Close the db and release memory
|
cosqa-train-16561
|
def close( self ):
"""
Close the db and release memory
"""
if self.db is not None:
self.db.commit()
self.db.close()
self.db = None
return
|
def normalize_value ( text ) : result = text . replace ( '\n' , ' ' ) result = re . subn ( '[ ]{2,}' , ' ' , result ) [ 0 ] return result
| 1 |
remove multiple \n from text python
|
This removes newlines and multiple spaces from a string .
|
cosqa-train-16562
|
def normalize_value(text):
"""
This removes newlines and multiple spaces from a string.
"""
result = text.replace('\n', ' ')
result = re.subn('[ ]{2,}', ' ', result)[0]
return result
|
def get_url_file_name ( url ) : assert isinstance ( url , ( str , _oldstr ) ) return urlparse . urlparse ( url ) . path . split ( '/' ) [ - 1 ]
| 0 |
name a file from url python
|
Get the file name from an url Parameters ---------- url : str
|
cosqa-train-16563
|
def get_url_file_name(url):
"""Get the file name from an url
Parameters
----------
url : str
Returns
-------
str
The file name
"""
assert isinstance(url, (str, _oldstr))
return urlparse.urlparse(url).path.split('/')[-1]
|
def strip_accents ( s ) : nfkd = unicodedata . normalize ( 'NFKD' , unicode ( s ) ) return u'' . join ( ch for ch in nfkd if not unicodedata . combining ( ch ) )
| 0 |
remove not letters python
|
Strip accents to prepare for slugification .
|
cosqa-train-16564
|
def strip_accents(s):
"""
Strip accents to prepare for slugification.
"""
nfkd = unicodedata.normalize('NFKD', unicode(s))
return u''.join(ch for ch in nfkd if not unicodedata.combining(ch))
|
def dump_nparray ( self , obj , class_name = numpy_ndarray_class_name ) : return { "$" + class_name : self . _json_convert ( obj . tolist ( ) ) }
| 0 |
ndarray object definition python
|
numpy . ndarray dumper .
|
cosqa-train-16565
|
def dump_nparray(self, obj, class_name=numpy_ndarray_class_name):
"""
``numpy.ndarray`` dumper.
"""
return {"$" + class_name: self._json_convert(obj.tolist())}
|
def drop_empty ( rows ) : return zip ( * [ col for col in zip ( * rows ) if bool ( filter ( bool , col [ 1 : ] ) ) ] )
| 0 |
remove row with empty cell python
|
Transpose the columns into rows remove all of the rows that are empty after the first cell then transpose back . The result is that columns that have a header but no data in the body are removed assuming the header is the first row .
|
cosqa-train-16566
|
def drop_empty(rows):
"""Transpose the columns into rows, remove all of the rows that are empty after the first cell, then
transpose back. The result is that columns that have a header but no data in the body are removed, assuming
the header is the first row. """
return zip(*[col for col in zip(*rows) if bool(filter(bool, col[1:]))])
|
def find_nearest_index ( arr , value ) : arr = np . array ( arr ) index = ( abs ( arr - value ) ) . argmin ( ) return index
| 0 |
nearest index value array python
|
For a given value the function finds the nearest value in the array and returns its index .
|
cosqa-train-16567
|
def find_nearest_index(arr, value):
"""For a given value, the function finds the nearest value
in the array and returns its index."""
arr = np.array(arr)
index = (abs(arr-value)).argmin()
return index
|
def rm_keys_from_dict ( d , keys ) : # Loop for each key given for key in keys : # Is the key in the dictionary? if key in d : try : d . pop ( key , None ) except KeyError : # Not concerned with an error. Keep going. pass return d
| 0 |
remove something from a dictionary python
|
Given a dictionary and a key list remove any data in the dictionary with the given keys .
|
cosqa-train-16568
|
def rm_keys_from_dict(d, keys):
"""
Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata
"""
# Loop for each key given
for key in keys:
# Is the key in the dictionary?
if key in d:
try:
d.pop(key, None)
except KeyError:
# Not concerned with an error. Keep going.
pass
return d
|
def index_nearest ( value , array ) : a = ( array - value ) ** 2 return index ( a . min ( ) , a )
| 0 |
nearest integer numpy array python
|
expects a _n . array returns the global minimum of ( value - array ) ^2
|
cosqa-train-16569
|
def index_nearest(value, array):
"""
expects a _n.array
returns the global minimum of (value-array)^2
"""
a = (array-value)**2
return index(a.min(), a)
|
def clean ( s ) : lines = [ l . rstrip ( ) for l in s . split ( '\n' ) ] return '\n' . join ( lines )
| 1 |
remove trailing whitespace in python
|
Removes trailing whitespace on each line .
|
cosqa-train-16570
|
def clean(s):
"""Removes trailing whitespace on each line."""
lines = [l.rstrip() for l in s.split('\n')]
return '\n'.join(lines)
|
def software_fibonacci ( n ) : a , b = 0 , 1 for i in range ( n ) : a , b = b , a + b return a
| 1 |
new fibonacci function in python
|
a normal old python function to return the Nth fibonacci number .
|
cosqa-train-16571
|
def software_fibonacci(n):
""" a normal old python function to return the Nth fibonacci number. """
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
|
def _breakRemNewlines ( tag ) : for i , c in enumerate ( tag . contents ) : if type ( c ) != bs4 . element . NavigableString : continue c . replace_with ( re . sub ( r' {2,}' , ' ' , c ) . replace ( '\n' , '' ) )
| 0 |
remove whitespace after xml closing tag python
|
non - recursively break spaces and remove newlines in the tag
|
cosqa-train-16572
|
def _breakRemNewlines(tag):
"""non-recursively break spaces and remove newlines in the tag"""
for i,c in enumerate(tag.contents):
if type(c) != bs4.element.NavigableString:
continue
c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
|
def forget_xy ( t ) : shape = ( t . shape [ 0 ] , None , None , t . shape [ 3 ] ) return tf . placeholder_with_default ( t , shape )
| 0 |
no gradients provided for any variable tf python help
|
Ignore sizes of dimensions ( 1 2 ) of a 4d tensor in shape inference .
|
cosqa-train-16573
|
def forget_xy(t):
"""Ignore sizes of dimensions (1, 2) of a 4d tensor in shape inference.
This allows using smaller input sizes, which create an invalid graph at higher
layers (for example because a spatial dimension becomes smaller than a conv
filter) when we only use early parts of it.
"""
shape = (t.shape[0], None, None, t.shape[3])
return tf.placeholder_with_default(t, shape)
|
def get_dimension_array ( array ) : if all ( isinstance ( el , list ) for el in array ) : result = [ len ( array ) , len ( max ( [ x for x in array ] , key = len , ) ) ] # elif array and isinstance(array, list): else : result = [ len ( array ) , 1 ] return result
| 0 |
no of dimensions in python list
|
Get dimension of an array getting the number of rows and the max num of columns .
|
cosqa-train-16574
|
def get_dimension_array(array):
"""
Get dimension of an array getting the number of rows and the max num of
columns.
"""
if all(isinstance(el, list) for el in array):
result = [len(array), len(max([x for x in array], key=len,))]
# elif array and isinstance(array, list):
else:
result = [len(array), 1]
return result
|
def _remove_blank ( l ) : ret = [ ] for i , _ in enumerate ( l ) : if l [ i ] == 0 : break ret . append ( l [ i ] ) return ret
| 1 |
remove zeros from list python
|
Removes trailing zeros in the list of integers and returns a new list of integers
|
cosqa-train-16575
|
def _remove_blank(l):
""" Removes trailing zeros in the list of integers and returns a new list of integers"""
ret = []
for i, _ in enumerate(l):
if l[i] == 0:
break
ret.append(l[i])
return ret
|
async def async_input ( prompt ) : print ( prompt , end = '' , flush = True ) return ( await loop . run_in_executor ( None , sys . stdin . readline ) ) . rstrip ( )
| 0 |
non blocking input python
|
Python s input () is blocking which means the event loop we set above can t be running while we re blocking there . This method will let the loop run while we wait for input .
|
cosqa-train-16576
|
async def async_input(prompt):
"""
Python's ``input()`` is blocking, which means the event loop we set
above can't be running while we're blocking there. This method will
let the loop run while we wait for input.
"""
print(prompt, end='', flush=True)
return (await loop.run_in_executor(None, sys.stdin.readline)).rstrip()
|
def sanitize_word ( s ) : s = re . sub ( '[^\w-]+' , '_' , s ) s = re . sub ( '__+' , '_' , s ) return s . strip ( '_' )
| 1 |
removing a symbol from a string in python
|
Remove non - alphanumerical characters from metric word . And trim excessive underscores .
|
cosqa-train-16577
|
def sanitize_word(s):
"""Remove non-alphanumerical characters from metric word.
And trim excessive underscores.
"""
s = re.sub('[^\w-]+', '_', s)
s = re.sub('__+', '_', s)
return s.strip('_')
|
def cell_normalize ( data ) : if sparse . issparse ( data ) : data = sparse . csc_matrix ( data . astype ( float ) ) # normalize in-place sparse_cell_normalize ( data . data , data . indices , data . indptr , data . shape [ 1 ] , data . shape [ 0 ] ) return data data_norm = data . astype ( float ) total_umis = [ ] for i in range ( data . shape [ 1 ] ) : di = data_norm [ : , i ] total_umis . append ( di . sum ( ) ) di /= total_umis [ i ] med = np . median ( total_umis ) data_norm *= med return data_norm
| 1 |
normalize a sparse matrix python
|
Returns the data where the expression is normalized so that the total count per cell is equal .
|
cosqa-train-16578
|
def cell_normalize(data):
"""
Returns the data where the expression is normalized so that the total
count per cell is equal.
"""
if sparse.issparse(data):
data = sparse.csc_matrix(data.astype(float))
# normalize in-place
sparse_cell_normalize(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0])
return data
data_norm = data.astype(float)
total_umis = []
for i in range(data.shape[1]):
di = data_norm[:,i]
total_umis.append(di.sum())
di /= total_umis[i]
med = np.median(total_umis)
data_norm *= med
return data_norm
|
def _str_to_list ( s ) : _list = s . split ( "," ) return list ( map ( lambda i : i . lstrip ( ) , _list ) )
| 1 |
removing items separated by comma from a list python
|
Converts a comma separated string to a list
|
cosqa-train-16579
|
def _str_to_list(s):
"""Converts a comma separated string to a list"""
_list = s.split(",")
return list(map(lambda i: i.lstrip(), _list))
|
def phase_correct_first ( spec , freq , k ) : c_factor = np . exp ( - 1j * k * freq ) c_factor = c_factor . reshape ( ( len ( spec . shape ) - 1 ) * ( 1 , ) + c_factor . shape ) return spec * c_factor
| 1 |
npss signal auto correction python
|
First order phase correction .
|
cosqa-train-16580
|
def phase_correct_first(spec, freq, k):
"""
First order phase correction.
Parameters
----------
spec : float array
The spectrum to be corrected.
freq : float array
The frequency axis.
k : float
The slope of the phase correction as a function of frequency.
Returns
-------
The phase-corrected spectrum.
Notes
-----
[Keeler2005] Keeler, J (2005). Understanding NMR Spectroscopy, 2nd
edition. Wiley. Page 88
"""
c_factor = np.exp(-1j * k * freq)
c_factor = c_factor.reshape((len(spec.shape) -1) * (1,) + c_factor.shape)
return spec * c_factor
|
def wordify ( text ) : stopset = set ( nltk . corpus . stopwords . words ( 'english' ) ) tokens = nltk . WordPunctTokenizer ( ) . tokenize ( text ) return [ w for w in tokens if w not in stopset ]
| 0 |
removing stop words in python without ntlk
|
Generate a list of words given text removing punctuation .
|
cosqa-train-16581
|
def wordify(text):
"""Generate a list of words given text, removing punctuation.
Parameters
----------
text : unicode
A piece of english text.
Returns
-------
words : list
List of words.
"""
stopset = set(nltk.corpus.stopwords.words('english'))
tokens = nltk.WordPunctTokenizer().tokenize(text)
return [w for w in tokens if w not in stopset]
|
def series_table_row_offset ( self , series ) : title_and_spacer_rows = series . index * 2 data_point_rows = series . data_point_offset return title_and_spacer_rows + data_point_rows
| 0 |
number of rows in a series python
|
Return the number of rows preceding the data table for * series * in the Excel worksheet .
|
cosqa-train-16582
|
def series_table_row_offset(self, series):
"""
Return the number of rows preceding the data table for *series* in
the Excel worksheet.
"""
title_and_spacer_rows = series.index * 2
data_point_rows = series.data_point_offset
return title_and_spacer_rows + data_point_rows
|
def myreplace ( astr , thefind , thereplace ) : alist = astr . split ( thefind ) new_s = alist . split ( thereplace ) return new_s
| 1 |
replace * in string in python
|
in string astr replace all occurences of thefind with thereplace
|
cosqa-train-16583
|
def myreplace(astr, thefind, thereplace):
"""in string astr replace all occurences of thefind with thereplace"""
alist = astr.split(thefind)
new_s = alist.split(thereplace)
return new_s
|
def series_table_row_offset ( self , series ) : title_and_spacer_rows = series . index * 2 data_point_rows = series . data_point_offset return title_and_spacer_rows + data_point_rows
| 0 |
number of rows in a sheet python
|
Return the number of rows preceding the data table for * series * in the Excel worksheet .
|
cosqa-train-16584
|
def series_table_row_offset(self, series):
"""
Return the number of rows preceding the data table for *series* in
the Excel worksheet.
"""
title_and_spacer_rows = series.index * 2
data_point_rows = series.data_point_offset
return title_and_spacer_rows + data_point_rows
|
def subn_filter ( s , find , replace , count = 0 ) : return re . gsub ( find , replace , count , s )
| 0 |
replace all occurrence of a searched pattern in python
|
A non - optimal implementation of a regex filter
|
cosqa-train-16585
|
def subn_filter(s, find, replace, count=0):
"""A non-optimal implementation of a regex filter"""
return re.gsub(find, replace, count, s)
|
def count_list ( the_list ) : count = the_list . count result = [ ( item , count ( item ) ) for item in set ( the_list ) ] result . sort ( ) return result
| 1 |
number of unique values in list python
|
Generates a count of the number of times each unique item appears in a list
|
cosqa-train-16586
|
def count_list(the_list):
"""
Generates a count of the number of times each unique item appears in a list
"""
count = the_list.count
result = [(item, count(item)) for item in set(the_list)]
result.sort()
return result
|
def clean_with_zeros ( self , x ) : x [ ~ np . any ( np . isnan ( x ) | np . isinf ( x ) , axis = 1 ) ] = 0 return x
| 0 |
replace all zeros in array with nan python
|
set nan and inf rows from x to zero
|
cosqa-train-16587
|
def clean_with_zeros(self,x):
""" set nan and inf rows from x to zero"""
x[~np.any(np.isnan(x) | np.isinf(x),axis=1)] = 0
return x
|
def read_array ( path , mmap_mode = None ) : file_ext = op . splitext ( path ) [ 1 ] if file_ext == '.npy' : return np . load ( path , mmap_mode = mmap_mode ) raise NotImplementedError ( "The file extension `{}` " . format ( file_ext ) + "is not currently supported." )
| 0 |
numpy load from file in folder python
|
Read a . npy array .
|
cosqa-train-16588
|
def read_array(path, mmap_mode=None):
"""Read a .npy array."""
file_ext = op.splitext(path)[1]
if file_ext == '.npy':
return np.load(path, mmap_mode=mmap_mode)
raise NotImplementedError("The file extension `{}` ".format(file_ext) +
"is not currently supported.")
|
def escape_tex ( value ) : newval = value for pattern , replacement in LATEX_SUBS : newval = pattern . sub ( replacement , newval ) return newval
| 1 |
replace latex with python
|
Make text tex safe
|
cosqa-train-16589
|
def escape_tex(value):
"""
Make text tex safe
"""
newval = value
for pattern, replacement in LATEX_SUBS:
newval = pattern.sub(replacement, newval)
return newval
|
def index_nearest ( value , array ) : a = ( array - value ) ** 2 return index ( a . min ( ) , a )
| 1 |
numpy nearest pointin python
|
expects a _n . array returns the global minimum of ( value - array ) ^2
|
cosqa-train-16590
|
def index_nearest(value, array):
"""
expects a _n.array
returns the global minimum of (value-array)^2
"""
a = (array-value)**2
return index(a.min(), a)
|
def _sub_patterns ( patterns , text ) : for pattern , repl in patterns : text = re . sub ( pattern , repl , text ) return text
| 0 |
replace multiple patterns using re python
|
Apply re . sub to bunch of ( pattern repl )
|
cosqa-train-16591
|
def _sub_patterns(patterns, text):
"""
Apply re.sub to bunch of (pattern, repl)
"""
for pattern, repl in patterns:
text = re.sub(pattern, repl, text)
return text
|
def as_list ( self ) : return [ self . name , self . value , [ x . as_list for x in self . children ] ]
| 0 |
object as a list python
|
Return all child objects in nested lists of strings .
|
cosqa-train-16592
|
def as_list(self):
"""Return all child objects in nested lists of strings."""
return [self.name, self.value, [x.as_list for x in self.children]]
|
def unapostrophe ( text ) : text = re . sub ( r'[%s]s?$' % '' . join ( APOSTROPHES ) , '' , text ) return text
| 0 |
replace parantheses in string python
|
Strip apostrophe and s from the end of a string .
|
cosqa-train-16593
|
def unapostrophe(text):
"""Strip apostrophe and 's' from the end of a string."""
text = re.sub(r'[%s]s?$' % ''.join(APOSTROPHES), '', text)
return text
|
def as_list ( self ) : return [ self . name , self . value , [ x . as_list for x in self . children ] ]
| 1 |
object as list python
|
Return all child objects in nested lists of strings .
|
cosqa-train-16594
|
def as_list(self):
"""Return all child objects in nested lists of strings."""
return [self.name, self.value, [x.as_list for x in self.children]]
|
def parse_querystring ( self , req , name , field ) : return core . get_value ( req . args , name , field )
| 0 |
request param in python django
|
Pull a querystring value from the request .
|
cosqa-train-16595
|
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.args, name, field)
|
def is_serializable ( obj ) : if inspect . isclass ( obj ) : return Serializable . is_serializable_type ( obj ) return isinstance ( obj , Serializable ) or hasattr ( obj , '_asdict' )
| 1 |
object of type 'listserializer' is not json serializable python
|
Return True if the given object conforms to the Serializable protocol .
|
cosqa-train-16596
|
def is_serializable(obj):
"""Return `True` if the given object conforms to the Serializable protocol.
:rtype: bool
"""
if inspect.isclass(obj):
return Serializable.is_serializable_type(obj)
return isinstance(obj, Serializable) or hasattr(obj, '_asdict')
|
def device_state ( device_id ) : if device_id not in devices : return jsonify ( success = False ) return jsonify ( state = devices [ device_id ] . state )
| 1 |
requests python json expecting value
|
Get device state via HTTP GET .
|
cosqa-train-16597
|
def device_state(device_id):
""" Get device state via HTTP GET. """
if device_id not in devices:
return jsonify(success=False)
return jsonify(state=devices[device_id].state)
|
def get_readonly_fields ( self , request , obj = None ) : return list ( self . readonly_fields ) + [ field . name for field in obj . _meta . fields ]
| 1 |
odoo+python+add read only field
|
Set all fields readonly .
|
cosqa-train-16598
|
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields]
|
def resample ( grid , wl , flux ) : flux_rs = ( interpolate . interp1d ( wl , flux ) ) ( grid ) return flux_rs
| 0 |
resample 2d lon lat grid python
|
Resample spectrum onto desired grid
|
cosqa-train-16599
|
def resample(grid, wl, flux):
""" Resample spectrum onto desired grid """
flux_rs = (interpolate.interp1d(wl, flux))(grid)
return flux_rs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.