| repo_name
				 stringlengths 5 100 | path
				 stringlengths 4 299 | copies
				 stringclasses 990
				values | size
				 stringlengths 4 7 | content
				 stringlengths 666 1.03M | license
				 stringclasses 15
				values | hash
				 int64 -9,223,351,895,964,839,000 9,223,297,778B | line_mean
				 float64 3.17 100 | line_max
				 int64 7 1k | alpha_frac
				 float64 0.25 0.98 | autogenerated
				 bool 1
				class | 
|---|---|---|---|---|---|---|---|---|---|---|
| 
	ahmadio/edx-platform | 
	lms/djangoapps/mobile_api/social_facebook/groups/views.py | 
	86 | 
	4938 | 
	"""
Views for groups info API
"""
from rest_framework import generics, status, mixins
from rest_framework.response import Response
from django.conf import settings
import facebook
from ...utils import mobile_view
from . import serializers
@mobile_view()
class Groups(generics.CreateAPIView, mixins.DestroyModelMixin):
    """
    **Use Case**
        An API to Create or Delete course groups.
        Note: The Delete is not invoked from the current version of the app
        and is used only for testing with facebook dependencies.
    **Creation Example request**:
        POST /api/mobile/v0.5/social/facebook/groups/
        Parameters: name : string,
                    description : string,
                    privacy : open/closed
    **Creation Response Values**
        {"id": group_id}
    **Deletion Example request**:
        DELETE /api/mobile/v0.5/social/facebook/groups/<group_id>
    **Deletion Response Values**
        {"success" : "true"}
    """
    serializer_class = serializers.GroupSerializer
    def create(self, request, *args, **kwargs):
        serializer = self.get_serializer(data=request.DATA, files=request.FILES)
        if not serializer.is_valid():
            return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
        try:
            app_groups_response = facebook_graph_api().request(
                settings.FACEBOOK_API_VERSION + '/' + settings.FACEBOOK_APP_ID + "/groups",
                post_args=request.POST.dict()
            )
            return Response(app_groups_response)
        except facebook.GraphAPIError, ex:
            return Response({'error': ex.result['error']['message']}, status=status.HTTP_400_BAD_REQUEST)
    def delete(self, request, *args, **kwargs):  # pylint: disable=unused-argument
        """
        Deletes the course group.
        """
        try:
            return Response(
                facebook_graph_api().request(
                    settings.FACEBOOK_API_VERSION + '/' + settings.FACEBOOK_APP_ID + "/groups/" + kwargs['group_id'],
                    post_args={'method': 'delete'}
                )
            )
        except facebook.GraphAPIError, ex:
            return Response({'error': ex.result['error']['message']}, status=status.HTTP_400_BAD_REQUEST)
@mobile_view()
class GroupsMembers(generics.CreateAPIView, mixins.DestroyModelMixin):
    """
    **Use Case**
        An API to Invite and Remove members to a group
        Note: The Remove is not invoked from the current version
        of the app and is used only for testing with facebook dependencies.
    **Invite Example request**:
        POST /api/mobile/v0.5/social/facebook/groups/<group_id>/member/
        Parameters: members : int,int,int...
    **Invite Response Values**
        {"member_id" : success/error_message}
        A response with each member_id and whether or not the member was added successfully.
        If the member was not added successfully the Facebook error message is provided.
    **Remove Example request**:
        DELETE /api/mobile/v0.5/social/facebook/groups/<group_id>/member/<member_id>
    **Remove Response Values**
        {"success" : "true"}
    """
    serializer_class = serializers.GroupsMembersSerializer
    def create(self, request, *args, **kwargs):
        serializer = self.get_serializer(data=request.DATA, files=request.FILES)
        if not serializer.is_valid():
            return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
        graph = facebook_graph_api()
        url = settings.FACEBOOK_API_VERSION + '/' + kwargs['group_id'] + "/members"
        member_ids = serializer.object['member_ids'].split(',')
        response = {}
        for member_id in member_ids:
            try:
                if 'success' in graph.request(url, post_args={'member': member_id}):
                    response[member_id] = 'success'
            except facebook.GraphAPIError, ex:
                response[member_id] = ex.result['error']['message']
        return Response(response, status=status.HTTP_200_OK)
    def delete(self, request, *args, **kwargs):  # pylint: disable=unused-argument
        """
        Deletes the member from the course group.
        """
        try:
            return Response(
                facebook_graph_api().request(
                    settings.FACEBOOK_API_VERSION + '/' + kwargs['group_id'] + "/members",
                    post_args={'method': 'delete', 'member': kwargs['member_id']}
                )
            )
        except facebook.GraphAPIError, ex:
            return Response({'error': ex.result['error']['message']}, status=status.HTTP_400_BAD_REQUEST)
def facebook_graph_api():
    """
    Returns the result from calling Facebook's Graph API with the app's access token.
    """
    return facebook.GraphAPI(facebook.get_app_access_token(settings.FACEBOOK_APP_ID, settings.FACEBOOK_APP_SECRET))
 | 
	agpl-3.0 | -5,134,061,850,296,026,000 | 33.531469 | 117 | 0.614824 | false | 
| 
	johnsonc/OTM2 | 
	opentreemap/treemap/tests/ui/uitest_registration_views.py | 
	12 | 
	2858 | 
	# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from time import sleep
from django.core.urlresolvers import reverse
from django.core import mail
from registration.models import RegistrationProfile
from treemap.tests.ui import UITestCase
from treemap.tests import make_user, create_mock_system_user
class LoginLogoutTest(UITestCase):
    def setUp(self):
        create_mock_system_user()
        super(LoginLogoutTest, self).setUp()
        self.user = make_user(username='username', password='password')
        self.profile = RegistrationProfile.objects.create_profile(self.user)
    def test_invalid_login(self):
        self.browse_to_url(reverse('auth_login'))
        login_url = self.driver.current_url
        self.process_login_form(
            self.user.username, 'passwordinvalid')
        # There should be an error list with at least one element
        self.wait_until_present('.errorlist li')
        # We should be on the same page
        self.assertEqual(login_url, self.driver.current_url)
    def test_valid_login(self):
        self.browse_to_url(reverse('auth_login'))
        login_url = self.driver.current_url
        self.process_login_form(self.user.username, 'password')
        email_element = self.wait_until_present(
            '[data-field="user.email"][data-class="display"]')
        # We should not be on the same page
        self.assertNotEqual(login_url, self.driver.current_url)
        # We should expect our username in the url
        self.assertIn(self.user.username, self.driver.current_url)
        value = email_element.get_attribute('data-value')
        self.assertEqual(self.user.email, value)
        sleep(1)  # prevent hang
class ForgotUsernameTest(UITestCase):
    def setUp(self):
        create_mock_system_user()
        super(ForgotUsernameTest, self).setUp()
        self.user = make_user(username='username', password='password')
    def tearDown(self):
        mail.outbox = []
        super(ForgotUsernameTest, self).tearDown()
    def test_can_get_to_page(self):
        self.browse_to_url(reverse('auth_login'))
        forgot_username_url = reverse('forgot_username')
        link = self.find_anchor_by_url(forgot_username_url)
        link.click()
        self.wait_until_present('input[name="email"]')
        self.assertEqual(self.live_server_url + forgot_username_url,
                         self.driver.current_url)
    def test_can_retrieve_username(self):
        self.browse_to_url(reverse('forgot_username'))
        email_elem = self.driver.find_element_by_name('email')
        email_elem.send_keys(self.user.email)
        self.click('form input[type="submit"]')
        self.wait_until_text_present('Email Sent')
        self.assertEqual(len(mail.outbox), 1)
 | 
	gpl-3.0 | 4,118,574,749,859,539,500 | 29.731183 | 76 | 0.659902 | false | 
| 
	catb0t/simpleforth | 
	forth.py | 
	1 | 
	7114 | 
	#!/usr/bin/env python3
import builtins
from pmlr import pmlr
debug_write = pmlr.util.debug_write
ERR_DATA = {
    ZeroDivisionError:  {"IS_FATAL": False, "TYPE": "DEBUG"},
    LookupError:        {"IS_FATAL": False, "TYPE": "RANGE"},
    IndexError:         {"IS_FATAL": False, "TYPE": "RANGE"},
    TypeError:          {"IS_FATAL": True,  "TYPE": "ERROR"},
    NameError:          {"IS_FATAL": True,  "TYPE": "FATAL"},
    ValueError:         {"IS_FATAL": True,  "TYPE": "FATAL"},
    AssertionError:     {"IS_FATAL": True,  "TYPE": "FATAL"},
}
def is_none(*args):
    return None in args
def cmp_all(val, *tests):
    return builtins.all([val == test for test in tests])
def all(*args):
    return builtins.all(args)
def any(*args):
    return builtins.any(args)
class Forth(object):
    def __init__(self):
        (self._stk, self._lopstk,
            self._retstk, self._sftstk) = [Stack() for i in range(4)]
        self.dict = {
            "": ()
        }
        self.funcdict = {
            "": ()
        }
    def run(self, prog, sandbox=False):
        pass
    def define(self, name, defn):
        defn = " ".join(defn).strip()
        try:
            self.run(defn, sandbox=True)
        except MalformedExpressionException as err:
            debug_write(err.msg, level=err.level)
        return None  # {"name": "None", "desc": "debug"}
class OpCore():
    """bare stack operator mixin"""
    def peek(self, from_idx=0, to_idx=-1):
        return self._stk[:]
    def pop(self, count=1, idx=-1):
        """( x -- )
        take something and return it"""
        if count > len(self._stk):
            pmlr.util.debug_write(
                "popping more items than exist on stack!\n",
                level="WARN"
            )
        # http://stackoverflow.com/a/34633242/4532996
        # from testing it seems that pop(x) is slower than pop()
        # pop(-1) doesn't seem to be optimised to pop(),
        # so avoid it if possible
        x = []
        if -1 == idx:
            for i in range(count):
                try:
                    x.append(self._stk.pop())
                except LookupError as err:
                    self.err(err, errtype="RANGE")
                    break
        else:
            for i in range(count):
                try:
                    x.append(self._stk.pop(idx))
                except LookupError as err:
                    self.err(err, errtype="RANGE")
                    break
        return x[0] if len(x) == 1 else list(reversed(x))
    def push(self, *args, idx=-1):
        """( -- x ... )
        put somethings at idx"""
        if idx == -1:
            self._stk.extend(args)
        else:
            if idx < 0:
                for arg in args:
                    self._stk.insert(idx, arg)
                    idx -= 1
            else:
                for arg in args:
                    self._stk.insert(idx, arg)
                    idx += 1
    def clear(self):
        """( z y x -- )
        clear the stack completely"""
        y = self._stk.copy()
        self._stk.clear()
        return y
    def pick(self, idx=-3, drop=False):
        """( x -- x )
        pick somethings from a range of indicies"""
        s = self._stk[idx]
        if drop: self._stk[idx] = []
        return s
    def drop(self, count=1, idx=-1):
        """( x -- )
        drop items without returning (cheaper pop)"""
        [self.pop(idx=idx) for i in range(count)]
    def dup(self, count=1, from_idx=-1):
        """( y -- y y )
        duplicate something and push"""
        try:
            y = self._stk[from_idx] * count
        except LookupError as err:
            self.err(err, errtype="RANGE")
        self.push(*y, idx=idx)
    def dupn(self, count=2, idx=-1):
        """( x y -- x y x y )
        dup count items from an idx"""
        y = []
        for i in range(count):
            try:
                y.append(self._stk[idx - i])
            except LookupError as err:
                if idx == 1:
                    continue
                else:
                    self.err(err, errtype="RANGE")
                    return None
        self.push(*y, idx=idx)
    def swap(self, idx=-1):
        """( x y -- y x )
        swap two things at an index"""
        self.push(*reversed([self.pop(idx=idx) for i in range(2)]), idx=idx)
    def rot(self, idx=-1, count=3):
        """( w x y z -- x y z w )
        rotate things left, at an index"""
        l = [self.pop(idx=idx) for i in range(count)]
        l.insert(0, l.pop())
        self.push(*l, idx=idx)
    def urot(self, idx=-1, count=3):
        """( w x y z -- z w x y )
        rotate things right, at an index"""
        l = [self.pop(idx=idx) for i in range(count)]
        l.append(l.pop(0))
        self.push(*l, idx=idx)
class OpLogik():
    pass
class OpString():
    pass
class Stack(OpCore, OpLogik, OpString):
    "the mixin mixer of the above mixins"
    def __init__(self):
        self._stk = []
    def err(self, err, errtype=None, framelevel=3):
        if errtype is None:
            errtype = ERR_DATA.get(err.__class__, {"TYPE": "FATAL"})["TYPE"]
        errtype = errtype.upper()
        debug_write(*err.args, "\n", level=errtype, framelevel=framelevel)
        if ERR_DATA.get(err.__class__, {"IS_FATAL": True})["IS_FATAL"]:
            raise err.__class__(
                pmlr.util.debug_fmt(
                    errtype, framelevel=framelevel
                ) + " " + "".join([str(i) for i in err.args])
            )
    def __repr__(self):
        return "<{}> {}".format(len(self._stk), _fmt_collection(self._stk))
is_collection = lambda c: any(issubclass(c.__class__, (list, tuple, dict, set)), isinstance(c, (list, tuple, dict, set)))
def _fmt_collection(col):
    "format a collection literal"
    t_super = col.__class__
    try:
        t_mro  = t_super.mro()
        t_meta = t_mro[1]
        if cmp_all(type(t_meta), object, type, type(object), type(type)): raise TypeError
    except (NameError, TypeError, IndexError, AttributeError) as err:
        if cmp_all(err.__class__, NameError, AttributeError) and not hasattr(t_super, "mro"): raise
        else: raise TypeError("need object instance but found {} (class constructor, type or object object)".format(type(col)))
    is_iter      = hasattr(col, "__iter__")
    is_meta_iter = hasattr(col.__class__, "__iter__")
    if not any(is_iter, is_meta_iter):
        raise TypeError("({}) {} object is not iterable".format(col, col.__class__))
    orderedary = (list, tuple, set)
    if any(isinstance(col, orderedary), issubclass(col.__class__, orderedary)):
        return "[ {} ]".format(" ".join(repr(i) if not is_collection(i) else _fmt_collection(i) for i in col))
    elif any(isinstance(col, dict), issubclass(col, dict)):
        return " ".join("{}:{}".format(str(key), str(value)) for key, value in col.items())
    else:
        raise TypeError("don't know how to format that container")
    return locals()
if __name__ == "__main__":
    from tests import main as test_main
    test_main() | 
	gpl-3.0 | 8,776,950,339,713,067,000 | 27.805668 | 127 | 0.50984 | false | 
| 
	skidzen/grit-i18n | 
	grit/tool/build.py | 
	2 | 
	19603 | 
	#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit build' tool along with integration for this tool with the
SCons build system.
'''
import filecmp
import getopt
import os
import shutil
import sys
from grit import grd_reader
from grit import util
from grit.tool import interface
from grit import shortcuts
# It would be cleaner to have each module register itself, but that would
# require importing all of them on every run of GRIT.
'''Map from <output> node types to modules under grit.format.'''
_format_modules = {
  'android':                  'android_xml',
  'c_format':                 'c_format',
  'chrome_messages_json':     'chrome_messages_json',
  'data_package':             'data_pack',
  'js_map_format':            'js_map_format',
  'rc_all':                   'rc',
  'rc_translateable':         'rc',
  'rc_nontranslateable':      'rc',
  'rc_header':                'rc_header',
  'resource_map_header':      'resource_map',
  'resource_map_source':      'resource_map',
  'resource_file_map_source': 'resource_map',
}
_format_modules.update(
    (type, 'policy_templates.template_formatter') for type in
        [ 'adm', 'admx', 'adml', 'reg', 'doc', 'json',
          'plist', 'plist_strings', 'ios_plist', 'android_policy' ])
def GetFormatter(type):
  modulename = 'grit.format.' + _format_modules[type]
  __import__(modulename)
  module = sys.modules[modulename]
  try:
    return module.Format
  except AttributeError:
    return module.GetFormatter(type)
class RcBuilder(interface.Tool):
  '''A tool that builds RC files and resource header files for compilation.
Usage:  grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*
All output options for this tool are specified in the input file (see
'grit help' for details on how to specify the input file - it is a global
option).
Options:
  -a FILE           Assert that the given file is an output. There can be
                    multiple "-a" flags listed for multiple outputs. If a "-a"
                    or "--assert-file-list" argument is present, then the list
                    of asserted files must match the output files or the tool
                    will fail. The use-case is for the build system to maintain
                    separate lists of output files and to catch errors if the
                    build system's list and the grit list are out-of-sync.
  --assert-file-list  Provide a file listing multiple asserted output files.
                    There is one file name per line. This acts like specifying
                    each file with "-a" on the command line, but without the
                    possibility of running into OS line-length limits for very
                    long lists.
  -o OUTPUTDIR      Specify what directory output paths are relative to.
                    Defaults to the current directory.
  -D NAME[=VAL]     Specify a C-preprocessor-like define NAME with optional
                    value VAL (defaults to 1) which will be used to control
                    conditional inclusion of resources.
  -E NAME=VALUE     Set environment variable NAME to VALUE (within grit).
  -f FIRSTIDSFILE   Path to a python file that specifies the first id of
                    value to use for resources.  A non-empty value here will
                    override the value specified in the <grit> node's
                    first_ids_file.
  -w WHITELISTFILE  Path to a file containing the string names of the
                    resources to include.  Anything not listed is dropped.
  -t PLATFORM       Specifies the platform the build is targeting; defaults
                    to the value of sys.platform. The value provided via this
                    flag should match what sys.platform would report for your
                    target platform; see grit.node.base.EvaluateCondition.
  -h HEADERFORMAT   Custom format string to use for generating rc header files.
                    The string should have two placeholders: {textual_id}
                    and {numeric_id}. E.g. "#define {textual_id} {numeric_id}"
                    Otherwise it will use the default "#define SYMBOL 1234"
  --output-all-resource-defines
  --no-output-all-resource-defines  If specified, overrides the value of the
                    output_all_resource_defines attribute of the root <grit>
                    element of the input .grd file.
  --write-only-new flag
                    If flag is non-0, write output files to a temporary file
                    first, and copy it to the real output only if the new file
                    is different from the old file.  This allows some build
                    systems to realize that dependent build steps might be
                    unnecessary, at the cost of comparing the output data at
                    grit time.
  --depend-on-stamp
                    If specified along with --depfile and --depdir, the depfile
                    generated will depend on a stampfile instead of the first
                    output in the input .grd file.
Conditional inclusion of resources only affects the output of files which
control which resources get linked into a binary, e.g. it affects .rc files
meant for compilation but it does not affect resource header files (that define
IDs).  This helps ensure that values of IDs stay the same, that all messages
are exported to translation interchange files (e.g. XMB files), etc.
'''
  def ShortDescription(self):
    return 'A tool that builds RC files for compilation.'
  def Run(self, opts, args):
    self.output_directory = '.'
    first_ids_file = None
    whitelist_filenames = []
    assert_output_files = []
    target_platform = None
    depfile = None
    depdir = None
    rc_header_format = None
    output_all_resource_defines = None
    write_only_new = False
    depend_on_stamp = False
    (own_opts, args) = getopt.getopt(args, 'a:o:D:E:f:w:t:h:',
        ('depdir=','depfile=','assert-file-list=',
         'output-all-resource-defines',
         'no-output-all-resource-defines',
         'depend-on-stamp',
         'write-only-new='))
    for (key, val) in own_opts:
      if key == '-a':
        assert_output_files.append(val)
      elif key == '--assert-file-list':
        with open(val) as f:
          assert_output_files += f.read().splitlines()
      elif key == '-o':
        self.output_directory = val
      elif key == '-D':
        name, val = util.ParseDefine(val)
        self.defines[name] = val
      elif key == '-E':
        (env_name, env_value) = val.split('=', 1)
        os.environ[env_name] = env_value
      elif key == '-f':
        # TODO([email protected]): Remove this override once change
        # lands in WebKit.grd to specify the first_ids_file in the
        # .grd itself.
        first_ids_file = val
      elif key == '-w':
        whitelist_filenames.append(val)
      elif key == '--output-all-resource-defines':
        output_all_resource_defines = True
      elif key == '--no-output-all-resource-defines':
        output_all_resource_defines = False
      elif key == '-t':
        target_platform = val
      elif key == '-h':
        rc_header_format = val
      elif key == '--depdir':
        depdir = val
      elif key == '--depfile':
        depfile = val
      elif key == '--write-only-new':
        write_only_new = val != '0'
      elif key == '--depend-on-stamp':
        depend_on_stamp = True
    if len(args):
      print 'This tool takes no tool-specific arguments.'
      return 2
    self.SetOptions(opts)
    if self.scons_targets:
      self.VerboseOut('Using SCons targets to identify files to output.\n')
    else:
      self.VerboseOut('Output directory: %s (absolute path: %s)\n' %
                      (self.output_directory,
                       os.path.abspath(self.output_directory)))
    if whitelist_filenames:
      self.whitelist_names = set()
      for whitelist_filename in whitelist_filenames:
        self.VerboseOut('Using whitelist: %s\n' % whitelist_filename);
        whitelist_contents = util.ReadFile(whitelist_filename, util.RAW_TEXT)
        self.whitelist_names.update(whitelist_contents.strip().split('\n'))
    self.write_only_new = write_only_new
    self.res = grd_reader.Parse(opts.input,
                                debug=opts.extra_verbose,
                                first_ids_file=first_ids_file,
                                defines=self.defines,
                                target_platform=target_platform)
    # If the output_all_resource_defines option is specified, override the value
    # found in the grd file.
    if output_all_resource_defines is not None:
      self.res.SetShouldOutputAllResourceDefines(output_all_resource_defines)
    # Set an output context so that conditionals can use defines during the
    # gathering stage; we use a dummy language here since we are not outputting
    # a specific language.
    self.res.SetOutputLanguage('en')
    if rc_header_format:
      self.res.AssignRcHeaderFormat(rc_header_format)
    self.res.RunGatherers()
    self.Process()
    if assert_output_files:
      if not self.CheckAssertedOutputFiles(assert_output_files):
        return 2
    if depfile and depdir:
      self.GenerateDepfile(depfile, depdir, first_ids_file, depend_on_stamp)
    return 0
  def __init__(self, defines=None):
    # Default file-creation function is built-in open().  Only done to allow
    # overriding by unit test.
    self.fo_create = open
    # key/value pairs of C-preprocessor like defines that are used for
    # conditional output of resources
    self.defines = defines or {}
    # self.res is a fully-populated resource tree if Run()
    # has been called, otherwise None.
    self.res = None
    # Set to a list of filenames for the output nodes that are relative
    # to the current working directory.  They are in the same order as the
    # output nodes in the file.
    self.scons_targets = None
    # The set of names that are whitelisted to actually be included in the
    # output.
    self.whitelist_names = None
    # Whether to compare outputs to their old contents before writing.
    self.write_only_new = False
  @staticmethod
  def AddWhitelistTags(start_node, whitelist_names):
    # Walk the tree of nodes added attributes for the nodes that shouldn't
    # be written into the target files (skip markers).
    from grit.node import include
    from grit.node import message
    from grit.node import structure
    for node in start_node:
      # Same trick data_pack.py uses to see what nodes actually result in
      # real items.
      if (isinstance(node, include.IncludeNode) or
          isinstance(node, message.MessageNode) or
          isinstance(node, structure.StructureNode)):
        text_ids = node.GetTextualIds()
        # Mark the item to be skipped if it wasn't in the whitelist.
        if text_ids and text_ids[0] not in whitelist_names:
          node.SetWhitelistMarkedAsSkip(True)
  @staticmethod
  def ProcessNode(node, output_node, outfile):
    '''Processes a node in-order, calling its formatter before and after
    recursing to its children.
    Args:
      node: grit.node.base.Node subclass
      output_node: grit.node.io.OutputNode
      outfile: open filehandle
    '''
    base_dir = util.dirname(output_node.GetOutputFilename())
    formatter = GetFormatter(output_node.GetType())
    formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)
    outfile.writelines(formatted)
  def Process(self):
    # Update filenames with those provided by SCons if we're being invoked
    # from SCons.  The list of SCons targets also includes all <structure>
    # node outputs, but it starts with our output files, in the order they
    # occur in the .grd
    if self.scons_targets:
      assert len(self.scons_targets) >= len(self.res.GetOutputFiles())
      outfiles = self.res.GetOutputFiles()
      for ix in range(len(outfiles)):
        outfiles[ix].output_filename = os.path.abspath(
          self.scons_targets[ix])
    else:
      for output in self.res.GetOutputFiles():
        output.output_filename = os.path.abspath(os.path.join(
          self.output_directory, output.GetFilename()))
    # If there are whitelisted names, tag the tree once up front, this way
    # while looping through the actual output, it is just an attribute check.
    if self.whitelist_names:
      self.AddWhitelistTags(self.res, self.whitelist_names)
    for output in self.res.GetOutputFiles():
      self.VerboseOut('Creating %s...' % output.GetFilename())
      # Microsoft's RC compiler can only deal with single-byte or double-byte
      # files (no UTF-8), so we make all RC files UTF-16 to support all
      # character sets.
      if output.GetType() in ('rc_header', 'resource_map_header',
          'resource_map_source', 'resource_file_map_source'):
        encoding = 'cp1252'
      elif output.GetType() in ('android', 'c_format', 'js_map_format', 'plist',
                                'plist_strings', 'doc', 'json', 'android_policy'):
        encoding = 'utf_8'
      elif output.GetType() in ('chrome_messages_json'):
        # Chrome Web Store currently expects BOM for UTF-8 files :-(
        encoding = 'utf-8-sig'
      else:
        # TODO(gfeher) modify here to set utf-8 encoding for admx/adml
        encoding = 'utf_16'
      # Set the context, for conditional inclusion of resources
      self.res.SetOutputLanguage(output.GetLanguage())
      self.res.SetOutputContext(output.GetContext())
      self.res.SetFallbackToDefaultLayout(output.GetFallbackToDefaultLayout())
      self.res.SetDefines(self.defines)
      # Make the output directory if it doesn't exist.
      self.MakeDirectoriesTo(output.GetOutputFilename())
      # Write the results to a temporary file and only overwrite the original
      # if the file changed.  This avoids unnecessary rebuilds.
      outfile = self.fo_create(output.GetOutputFilename() + '.tmp', 'wb')
      if output.GetType() != 'data_package':
        outfile = util.WrapOutputStream(outfile, encoding)
      # Iterate in-order through entire resource tree, calling formatters on
      # the entry into a node and on exit out of it.
      with outfile:
        self.ProcessNode(self.res, output, outfile)
      # Now copy from the temp file back to the real output, but on Windows,
      # only if the real output doesn't exist or the contents of the file
      # changed.  This prevents identical headers from being written and .cc
      # files from recompiling (which is painful on Windows).
      if not os.path.exists(output.GetOutputFilename()):
        os.rename(output.GetOutputFilename() + '.tmp',
                  output.GetOutputFilename())
      else:
        # CHROMIUM SPECIFIC CHANGE.
        # This clashes with gyp + vstudio, which expect the output timestamp
        # to change on a rebuild, even if nothing has changed, so only do
        # it when opted in.
        if not self.write_only_new:
          write_file = True
        else:
          files_match = filecmp.cmp(output.GetOutputFilename(),
              output.GetOutputFilename() + '.tmp')
          write_file = not files_match
        if write_file:
          shutil.copy2(output.GetOutputFilename() + '.tmp',
                       output.GetOutputFilename())
        os.remove(output.GetOutputFilename() + '.tmp')
      self.VerboseOut(' done.\n')
    # Print warnings if there are any duplicate shortcuts.
    warnings = shortcuts.GenerateDuplicateShortcutsWarnings(
        self.res.UberClique(), self.res.GetTcProject())
    if warnings:
      print '\n'.join(warnings)
    # Print out any fallback warnings, and missing translation errors, and
    # exit with an error code if there are missing translations in a non-pseudo
    # and non-official build.
    warnings = (self.res.UberClique().MissingTranslationsReport().
        encode('ascii', 'replace'))
    if warnings:
      self.VerboseOut(warnings)
    if self.res.UberClique().HasMissingTranslations():
      print self.res.UberClique().missing_translations_
      sys.exit(-1)
  def CheckAssertedOutputFiles(self, assert_output_files):
    '''Checks that the asserted output files are specified in the given list.
    Returns true if the asserted files are present. If they are not, returns
    False and prints the failure.
    '''
    # Compare the absolute path names, sorted.
    asserted = sorted([os.path.abspath(i) for i in assert_output_files])
    actual = sorted([
        os.path.abspath(os.path.join(self.output_directory, i.GetFilename()))
        for i in self.res.GetOutputFiles()])
    if asserted != actual:
      missing = list(set(actual) - set(asserted))
      extra = list(set(asserted) - set(actual))
      error = '''Asserted file list does not match.
Expected output files:
%s
Actual output files:
%s
Missing output files:
%s
Extra output files:
%s
'''
      print error % ('\n'.join(asserted), '\n'.join(actual), '\n'.join(missing),
          '\n'.join(extra))
      return False
    return True
  def GenerateDepfile(self, depfile, depdir, first_ids_file, depend_on_stamp):
    '''Generate a depfile that contains the imlicit dependencies of the input
    grd. The depfile will be in the same format as a makefile, and will contain
    references to files relative to |depdir|. It will be put in |depfile|.
    For example, supposing we have three files in a directory src/
    src/
      blah.grd    <- depends on input{1,2}.xtb
      input1.xtb
      input2.xtb
    and we run
      grit -i blah.grd -o ../out/gen --depdir ../out --depfile ../out/gen/blah.rd.d
    from the directory src/ we will generate a depfile ../out/gen/blah.grd.d
    that has the contents
      gen/blah.h: ../src/input1.xtb ../src/input2.xtb
    Where "gen/blah.h" is the first output (Ninja expects the .d file to list
    the first output in cases where there is more than one). If the flag
    --depend-on-stamp is specified, "gen/blah.rd.d.stamp" will be used that is
    'touched' whenever a new depfile is generated.
    Note that all paths in the depfile are relative to ../out, the depdir.
    '''
    depfile = os.path.abspath(depfile)
    depdir = os.path.abspath(depdir)
    infiles = self.res.GetInputFiles()
    # We want to trigger a rebuild if the first ids change.
    if first_ids_file is not None:
      infiles.append(first_ids_file)
    if (depend_on_stamp):
      output_file = depfile + ".stamp"
      # Touch the stamp file before generating the depfile.
      with open(output_file, 'a'):
        os.utime(output_file, None)
    else:
      # Get the first output file relative to the depdir.
      outputs = self.res.GetOutputFiles()
      output_file = os.path.join(self.output_directory,
                                 outputs[0].GetFilename())
    output_file = os.path.relpath(output_file, depdir)
    # The path prefix to prepend to dependencies in the depfile.
    prefix = os.path.relpath(os.getcwd(), depdir)
    deps_text = ' '.join([os.path.join(prefix, i) for i in infiles])
    depfile_contents = output_file + ': ' + deps_text
    self.MakeDirectoriesTo(depfile)
    outfile = self.fo_create(depfile, 'wb')
    outfile.writelines(depfile_contents)
  @staticmethod
  def MakeDirectoriesTo(file):
    '''Creates directories necessary to contain |file|.'''
    dir = os.path.split(file)[0]
    if not os.path.exists(dir):
      os.makedirs(dir)
 | 
	bsd-2-clause | -7,293,954,927,933,790,000 | 38.363454 | 83 | 0.644442 | false | 
| 
	minosg/piblinker | 
	piblinker.py | 
	1 | 
	14790 | 
	#!/usr/bin/env python
"""blinky.py: A small library that uses wiriping pi access to raspbery pi GPIO
   ports,aimed at providing a simple notification interface"""
__author__ = "[email protected]"
__license__ = "LGPL"
__version__ = "0.0.1"
__email__ = "Minos Galanakis"
__project__ = "smartpi"
__date__ = "01-06-2015"
import io
import time
import fcntl
import serial
import struct
from subprocess import Popen, PIPE
from colorlogger import CLogger
from functools import wraps
from pidaemon import start_daemon, kill_daemon, normal_start
def blinker(color, period=0.2, times=3):
    """ Decorator that allows modular output formating for PiLogger """
    def blinker_decorator(func):
        @wraps(func)
        def func_wrapper(class_obj, message):
            # Blinke the LED before printing sdout
            class_obj.blink(color, times, period)
            return func(class_obj, color, message)
        return func_wrapper
    return blinker_decorator
class PiBlinkerError(Exception):
    __module__ = 'exceptions'
class PiBlinker():
    def __init__(self):
        raise ValueError('PiBlinker is not meant to be instantiated')
    @classmethod
    def setup(self,
              log_level="ver_debug",
              log_label="PiBlinker",
              log_path=None,
              log_colors=None):
        """ Module Init."""
        # Map a color to GPIO.BCM PIN
        self.LEDS = {"RED": [17],
                     "GREEN": [18],
                     "BLUE": [27],
                     "PURPLE": [17, 27],
                     "YELLOW": [17, 18],
                     "CYAN": [18, 27],
                     "WHITE": [17, 18, 27]}
        self.last_mode = 0
        # Configure the GPIO ports in hardware
        map(self.run, [(x % v) for n in self.LEDS.values()
                       for v in n
                       for x in ["gpio export %d out",
                                 "gpio -g mode %d out"]])
        self.i2c_devices = {}
        # Assosiate log levels with colors
        if not log_colors:
            log_colors = {"base_color": "CYAN",
                          "info": "HBLUE",
                          "warning": "YELLOW",
                          "error": "RED",
                          "debug": "GREEN",
                          "ver_debug": "GREEN"}
        # Initalise the logging module
        CLogger.setup(log_label, log_level, log_path, log_colors)
        return self
    @staticmethod
    def run(cmd):
        """ Execute shell command in detached mdoe."""
        proc = Popen([cmd], stdout=PIPE, stderr=PIPE, shell=True)
        ret, err = proc.communicate()
        if err:
            # ignore warnings in error stream
            if "Warning" in err:
                CLogger.warning(err.strip())
                return err
            raise PiBlinkerError(err)
        else:
            return ret
    @classmethod
    def set_led(self, led, mode):
        """ Set an LED to one of the supported states."""
        if led not in self.LEDS.keys():
            return
        mlist = {"ON": 1, "OFF": 0, "Toggle": -1}
        # convert input to a numerical mode
        try:
            md = mode if mode not in mlist\
                else {k: v for k, v in mlist.iteritems()}[mode]
        except KeyError:
            raise PiBlinkerError("Mode %s is not reognised" % mode)
        # Toggle the led if required
        led_state = md if md >= 0 else (self.last_mode + 1) % 2
        # Toggle the GPIO
        map(self.run, ["gpio -g write %d %d" % (led_no, led_state) for
                       led_no in self.LEDS[led]])
        self.last_mode = led_state
    @classmethod
    def blink(self, led, times, delay=1):
        """ Blink an LED n number of times."""
        # Make sure led is uppercase
        led = led.upper()
        if led not in self.LEDS.keys():
            return
        mode = 0
        count = 1
        while (count <= times * 2):
            self.set_led(led, mode)
            time.sleep(delay)
            mode = (mode + 1) % 2
            count += 1
        self.set_led(led, mode)
    @classmethod
    def led_print(self, color, text):
        """ Print a debug message and notify the user with the LED."""
        eval("self.%s" % color.lower())(text)
    @classmethod
    def led_bcast(self, data):
        """ Broadcast a number through led brings """
        import re
        # separate the numbers in the string ie 192.168.3.1 will become array
        data = map(int, filter(lambda x: x, re.split(r'\D', data)))
        # Separate the digits to a three color tuple
        data = map(lambda x: (x/100, (x % 100)/10, (x % 10)), data)
        for red_cnt, green_cnt, blue_cnt in data:
            self.blink("GREEN", 1, 1)
            time.sleep(0.5)
            self.blink("RED", red_cnt, 0.2)
            time.sleep(0.5)
            self.blink("GREEN", green_cnt, 0.2)
            time.sleep(0.5)
            self.blink("BLUE", blue_cnt, 0.2)
            time.sleep(0.5)
            self.blink("RED", 1, 1)
    @classmethod
    @blinker("RED")
    def red(self, *args):
        """ Print a debug message and notify the user with the LED."""
        color, message = args
        print"|%s|> %s" % (color, message)
    @classmethod
    @blinker("GREEN")
    def green(self, *args):
        """ Print a debug message and notify the user with the LED."""
        color, message = args
        print"|%s|> %s" % (color, message)
    @classmethod
    @blinker("BLUE")
    def blue(self, *args):
        """ Print a debug message and notify the user with the LED."""
        color, message = args
        print"|%s|> %s" % (color, message)
    @classmethod
    @blinker("RED")
    def error(self, *args):
        """ Print a debug message and notify the user with the LED."""
        CLogger.error(args[-1])
    @classmethod
    @blinker("BLUE")
    def info(self, *args):
        """ Print a debug message and notify the user with the LED."""
        CLogger.info(args[-1])
    @classmethod
    @blinker("RED")
    def warning(self, *args):
        """ Print a debug message and notify the user with the LED."""
        CLogger.warning(args[-1])
    @classmethod
    @blinker("GREEN")
    def debug(self, *args):
        """ Print a debug message and notify the user with the LED."""
        CLogger.debug(args[-1])
    @classmethod
    def uart_open(self, port="/dev/ttyAMA0", baud=9600, time_out=None):
        """Open the Serial Channel"""
        try:
            self.uart = serial.Serial(port, baud, timeout=time_out)
        except serial.SerialException:
            print "** Failed to initialize serial, check your port.** "
            raise ValueError
    @classmethod
    def uart_activate(self):
        """ Spam UART port untill it receives an ACK """
        self.uart_open()
        countr = 0
        # Test with a not supported command
        t_char = "O"
        while True:
            self.uart.write(t_char)
            if self.uart.inWaiting():
                repl = self.uart.read(2)
                if repl == "OK":
                    print "UART Activated"
                else:
                    print "UART was already enabled"
                break
            elif countr == 99:
                # Test with a supported command to see if activated
                t_char = "2"
            elif countr > 100:
                break
            time.sleep(0.05)
            countr += 1
    @classmethod
    def uart_read(self, target="ADC"):
        """Read the register through uart"""
        cmd = {"ADC": "2", "PIN": "1"}
        if target in cmd.keys():
            self.uart.write(cmd[target])
            return self.uart.readline()[:-1]
    @classmethod
    def uart_close(self):
        """Close the serial channel"""
        self.uart.close()
    @classmethod
    def i2c_open_file(self, slave_id, bus=1):
        """Open the I2C channel for raw byte comms"""
        if slave_id in self.i2c_devices.keys():
            print "Device %d already open" % slave_id
            return
        # Open the file descriptors
        read_ch = io.open("/dev/i2c-"+str(bus), "rb", buffering=0)
        write_ch = io.open("/dev/i2c-"+str(bus), "wb", buffering=0)
        # Set the register
        fcntl.ioctl(read_ch, 0x0703, slave_id)
        fcntl.ioctl(write_ch, 0x0703, slave_id)
        # store it to an internal dict
        self.i2c_devices[slave_id] = (read_ch, write_ch)
        # return the file descriptors if the user wants to manually drive them
        return (read_ch, write_ch)
    @classmethod
    def i2c_write_as(self, slave_id, format, data):
        """Write the data formatted using struct pack,
        Format needs to be specified"""
        try:
            wb_file = self.i2c_devices[slave_id][1]
            wb_file.write(struct.pack(format, data))
        except KeyError:
            print "Device %d does not exist" % slave_id
        except struct.error:
            print "Pack Error make sure the data fits the format structure"
        except:
            raise IOError
    @classmethod
    def i2c_read_as(self, slave_id, format, byte_no):
        try:
            rb_file = self.i2c_devices[slave_id][0]
            return struct.unpack(format, rb_file.read(byte_no))
        except KeyError:
            print "Device %d does not exit" % slave_id
        except struct.error:
            print "Pack Error make sure the data fits the format structure"
        except:
            raise IOError
    @classmethod
    def i2c_close(self, slave_id):
        """Close the file descriptors associated to the slave channel"""
        try:
            self.i2c_devices.pop(slave_id)
        except KeyError:
            print "Device %d does not exit" % slave_id
    @classmethod
    def demux(self, data):
        """ For efficiency purposes 10Bit ADC are muxed GPIO state."""
        adc_val = data & 0x3FF
        pin_val = (data >> 15)
        return (adc_val, pin_val)
    @classmethod
    def i2c_read_adc(self, slave_id):
        """Reads data as returned from a 10Bit ADC sampling operation"""
        return self.demux(self.i2c_read_as(slave_id, '>H', 2)[0])[0]
    @classmethod
    def i2c_read_pin(self, slave_id):
        """Reads data as returned from a 10Bit ADC sampling operation"""
        return self.demux(self.i2c_read_as(slave_id, '>H', 2)[0])[1]
    @classmethod
    def test_hardware(self):
        """ Detect hardware shield's presense """
        detected = False
        try:
            self.uart_open(time_out=2)
            reading = self.uart_read("ADC")
            if len(reading):
                detected = True
        except:
            pass
        try:
            readf, writef = self.i2c_open_file(0x04, 1)
            self.i2c_read_as(04, ">H", 2)
            self.i2c_close(0x04)
            detected = True
        except:
            pass
        return detected
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-t", "--test", help="Test Hardware, select from [all,\
                    i2c, led, log, poll, uart]", dest='test')
    parser.add_argument("-a", "--activate", help="Activate UART mode\
                        after a reset", action="store_true")
    parser.add_argument("-d", "--daemon", help="Start a button monitor daemon",
                        action="store_true")
    parser.add_argument("-nd", "--nodaemon", help="Start monitor without\
                        daemon context used in conjuction with wrappers",
                        action="store_true")
    parser.add_argument("-b1", "--button1", help="Bind script to button1",
                        dest='button1')
    parser.add_argument("-b2", "--button2", help="Bind script to button2",
                        dest='button2')
    parser.add_argument("-u", "--user", help="Select different user\
                        to run script as")
    parser.add_argument("-s", "--sudopass", help="Set optional sudo password\
                        for elevated priviledges")
    parser.add_argument("-k", "--kill", help="increase output verbosity",
                        action="store_true")
    parser.add_argument("-i", "--blinkip", help="increase output verbosity",
                        action="store_true")
    args = parser.parse_args()
    mode = 0
    pb = PiBlinker.setup()
    if args.daemon or args.nodaemon:
        arguments = [args.button1, args.button2, args.user, args.sudopass]
        if args.nodaemon:
            normal_start(*arguments)
        else:
            start_daemon(*arguments)
    elif args.kill:
        kill_daemon()
    elif args.activate:
        pb.uart_activate()
    elif args.blinkip:
        pb.led_bcast(pb.run("hostname -I"))
    elif args.test:
        if args.test == "all":
            pb.red("This is important")
            pb.green("This worked")
            pb.blue("This you should know")
            readf, writef = pb.i2c_open_file(0x04, 1)
            # read two bytes using the direct file descriptor
            print "|RAW ADC|>", repr(readf.read(2))
            # read a 2byte uint8_t variable
            print "|DEC ADC|>", pb.i2c_read_as(04, ">H", 2)[0]
            pb.i2c_close(0x04)
            pb.info("This is an info")
            pb.warning("This is a warning")
            pb.error("This is an error")
            pb.debug("This is debug")
        elif args.test == "i2c":
            readf, writef = pb.i2c_open_file(0x04, 1)
            # read two bytes using the direct file descriptor
            print "|RAW ADC|>", repr(readf.read(2))
            # read a 2byte uint8_t variable
            print "|DEC ADC|>", pb.i2c_read_as(04, ">H", 2)[0]
            pb.i2c_close(0x04)
        elif args.test == "poll":
            readf, writef = pb.i2c_open_file(0x04, 1)
            try:
                while True:
                    # Read using read ADC
                    print "| ADC:", pb.i2c_read_adc(0x04), "| PIN: ",\
                        pb.i2c_read_pin(0x04), "|"
                    time.sleep(0.2)
            except KeyboardInterrupt:
                pass
            pb.i2c_close(0x04)
        elif args.test == "uart":
            pb.uart_open()
            print "ADC:", pb.uart_read("ADC")
            print "PIN:", pb.uart_read("PIN")
            pb.uart_close()
        elif args.test == "led":
            pb.led_print("RED", "This is RED")
            pb.led_print("GREEN", "This is GREEN")
            pb.led_print("BLUE", "This is BLUE")
        elif args.test == "log":
            pb.info("This is an info")
            pb.warning("This is a warning")
            pb.error("This is an error")
            pb.debug("This is debug")
    else:
        parser.print_help()
 | 
	lgpl-2.1 | -7,796,536,866,510,282,000 | 30.468085 | 79 | 0.529479 | false | 
| 
	jonwright/ImageD11 | 
	scripts/plotImageD11map.py | 
	1 | 
	1829 | 
	#!/usr/bin/env python
from __future__ import print_function
from ImageD11.grain import read_grain_file
import sys, os
gf = read_grain_file(sys.argv[1])
mapfile=open(sys.argv[2],"w")
def dodot(xyz,k):
    mapfile.write("%f %f %f %d\n"%(xyz[0],xyz[1],xyz[2],k))
def getmedian(s):
    items=s.split()
    j = -1
    for i in range(len(items)):
        if items[i] == "median":
            j = i
    if j == -1:
        return 0
    return abs(float(items[j+2]))
            
try:
    outersf = float(sys.argv[3])
except:
    outersf = 1.0
print("Scale factor is",outersf)
for g in gf:
    #print g.translation, g.ubi
    mapfile.write("\n\n")
    o = g.translation
    try:
        sf = pow(getmedian(g.intensity_info),0.3333)*outersf
    except:
        sf = outersf
    try:
        k = int(g.npks)
    except:
        k = 1
    for u in g.ubi:
        dodot(o,k)
        dodot(o+u*sf,int(g.npks))
    for u in g.ubi:
        dodot(o,k)
        dodot(o-u*sf,int(g.npks))
#    dodot(o,k)
#    dodot(o+sf*(-g.ubi[0]-g.ubi[1]),k)
#    dodot(o,k)
#    dodot(o+sf*(g.ubi[0]+g.ubi[1]),k)
mapfile.close()
term = " "
if "linux" in sys.platform:
    term = "set term x11"
if "win32" in sys.platform:
    term = "set term windows"
    
open("gnuplot.in","w").write("""
%s
set ticslevel 0
set title "Color proportional to number of peaks"
set palette model RGB
set palette defined ( 0 "violet", 1 "blue", 2 "green", 3 "yellow", 4 "orange", 5 "red" )
set view equal xyz
set view 75,0,1,1
#set terminal gif animate delay 10 loop 1 optimize size 1024,768
set nokey
set hidden3d
#set output "ImageD11map.gif"
splot "%s" u 1:2:3:4 w l lw 2 lc pal z
"""%(term, sys.argv[2])
# "".join(["set view 75,%d\n replot\n"%(i) for i in range(1,360,1)])
                             )
    
os.system("gnuplot -background white gnuplot.in -")
    
 | 
	gpl-2.0 | -5,133,550,507,565,548,000 | 21.304878 | 88 | 0.574631 | false | 
| 
	inspyration/odoo | 
	addons/email_template/tests/test_mail.py | 
	124 | 
	14318 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Business Applications
#    Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
    def setUp(self):
        super(test_message_compose, self).setUp()
        # create a 'pigs' and 'bird' groups that will be used through the various tests
        self.group_bird_id = self.mail_group.create(self.cr, self.uid,
            {'name': 'Bird', 'description': 'I am angry !'})
    def test_00_message_compose_wizard(self):
        """ Tests designed for the mail.compose.message wizard updated by email_template. """
        cr, uid = self.cr, self.uid
        mail_compose = self.registry('mail.compose.message')
        self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': '[email protected]'})
        user_admin = self.res_users.browse(cr, uid, uid)
        p_a_id = user_admin.partner_id.id
        group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
        group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
        # Mail data
        _subject1 = 'Pigs'
        _subject2 = 'Bird'
        _body_html1 = 'Fans of Pigs, unite !'
        _body_html2 = 'I am angry !'
        _attachments = [
            {'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
            {'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
            ]
        _attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
        # Create template on mail.group, with attachments
        group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
        email_template = self.registry('email.template')
        email_template_id = email_template.create(cr, uid, {
            'model_id': group_model_id,
            'name': 'Pigs Template',
            'subject': '${object.name}',
            'body_html': '${object.description}',
            'user_signature': False,
            'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
            'email_to': '[email protected], [email protected]',
            'email_cc': '[email protected]'
            })
        # ----------------------------------------
        # CASE1: comment and save as template
        # ----------------------------------------
        # 1. Comment on pigs
        compose_id = mail_compose.create(cr, uid,
            {'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
            {'default_composition_mode': 'comment',
                'default_model': 'mail.group',
                'default_res_id': self.group_pigs_id,
                'active_ids': [self.group_pigs_id, self.group_bird_id]})
        compose = mail_compose.browse(cr, uid, compose_id)
        # 2. Save current composition form as a template
        mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
        # Test: email_template subject, body_html, model
        last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
        self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
        last_template = email_template.browse(cr, uid, last_template_id)
        self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
        # ----------------------------------------
        # CASE2: comment with template, save as template
        # ----------------------------------------
        # 1. Comment on pigs
        context = {
            'default_composition_mode': 'comment',
            'default_model': 'mail.group',
            'default_res_id': self.group_pigs_id,
            'default_use_template': False,
            'default_template_id': email_template_id,
            'active_ids': [self.group_pigs_id, self.group_bird_id]
        }
        compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
        compose = mail_compose.browse(cr, uid, compose_id, context)
        onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
        onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
        onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
        compose.write(onchange_res)
        compose.refresh()
        message_pids = [partner.id for partner in compose.partner_ids]
        partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['[email protected]', '[email protected]', '[email protected]'])])
        # Test: mail.compose.message: subject, body, partner_ids
        self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
        self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
        self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
        # Test: mail.compose.message: attachments (owner has not been modified)
        for attach in compose.attachment_ids:
            self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
            self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
            self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
                'mail.message attachment name / data incorrect')
        # Test: mail.message: attachments
        mail_compose.send_mail(cr, uid, [compose_id])
        group_pigs.refresh()
        message_pigs = group_pigs.message_ids[0]
        for attach in message_pigs.attachment_ids:
            self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
            self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
            self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
                'mail.message attachment name / data incorrect')
        # ----------------------------------------
        # CASE3: mass_mail with template
        # ----------------------------------------
        # 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
        context = {
            'default_composition_mode': 'mass_mail',
            'default_notify': True,
            'default_model': 'mail.group',
            'default_res_id': self.group_pigs_id,
            'default_template_id': email_template_id,
            'default_partner_ids': [p_a_id],
            'active_ids': [self.group_pigs_id, self.group_bird_id]
        }
        compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
        compose = mail_compose.browse(cr, uid, compose_id, context)
        onchange_res = compose.onchange_template_id(email_template_id, 'mass_mail', 'mail.group', self.group_pigs_id)['value']
        onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
        onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
        compose.write(onchange_res)
        compose.refresh()
        message_pids = [partner.id for partner in compose.partner_ids]
        partner_ids = [p_a_id]
        self.assertEqual(compose.subject, '${object.name}', 'mail.compose.message subject incorrect')
        self.assertEqual(compose.body, '<p>${object.description}</p>', 'mail.compose.message body incorrect')  # todo: check signature
        self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
        # 2. Post the comment, get created message
        mail_compose.send_mail(cr, uid, [compose_id],  {'default_res_id': -1, 'active_ids': [self.group_pigs_id, self.group_bird_id]})
        group_pigs.refresh()
        group_bird.refresh()
        message_pigs = group_pigs.message_ids[0]
        message_bird = group_bird.message_ids[0]
        # Test: subject, body
        self.assertEqual(message_pigs.subject, _subject1, 'mail.message subject on Pigs incorrect')
        self.assertEqual(message_bird.subject, _subject2, 'mail.message subject on Bird incorrect')
        self.assertIn(_body_html1, message_pigs.body, 'mail.message body on Pigs incorrect')
        self.assertIn(_body_html2, message_bird.body, 'mail.message body on Bird incorrect')
        # Test: partner_ids: p_a_id (default) + 3 newly created partners
        # message_pigs_pids = [partner.id for partner in message_pigs.notified_partner_ids]
        # message_bird_pids = [partner.id for partner in message_bird.notified_partner_ids]
        # partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['[email protected]', '[email protected]', '[email protected]'])])
        # partner_ids.append(p_a_id)
        # self.assertEqual(set(message_pigs_pids), set(partner_ids), 'mail.message on pigs incorrect number of notified_partner_ids')
        # self.assertEqual(set(message_bird_pids), set(partner_ids), 'mail.message on bird notified_partner_ids incorrect')
        # ----------------------------------------
        # CASE4: test newly introduced partner_to field
        # ----------------------------------------
        # get already-created partners back
        p_b_id = self.res_partner.search(cr, uid, [('email', '=', '[email protected]')])[0]
        p_c_id = self.res_partner.search(cr, uid, [('email', '=', '[email protected]')])[0]
        p_d_id = self.res_partner.search(cr, uid, [('email', '=', '[email protected]')])[0]
        # modify template: use partner_to, use template and email address in email_to to test all features together
        user_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'res.users')])[0]
        email_template.write(cr, uid, [email_template_id], {
            'model_id': user_model_id,
            'body_html': '${object.login}',
            'email_to': '${object.email}, c@c',
            'partner_to': '%i,%i' % (p_b_id, p_c_id),
            'email_cc': 'd@d',
            })
        # patner by email + partner by id (no double)
        send_to = [p_a_id, p_b_id, p_c_id, p_d_id]
        # Generate messsage with default email and partner on template
        mail_value = mail_compose.generate_email_for_composer(cr, uid, email_template_id, uid)
        self.assertEqual(set(mail_value['partner_ids']), set(send_to), 'mail.message partner_ids list created by template is incorrect')
    @mute_logger('openerp.models')
    def test_10_email_templating(self):
        """ Tests designed for the mail.compose.message wizard updated by email_template. """
        cr, uid, context = self.cr, self.uid, {}
        # create the email.template on mail.group model
        group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
        email_template = self.registry('email.template')
        email_template_id = email_template.create(cr, uid, {
            'model_id': group_model_id,
            'name': 'Pigs Template',
            'email_from': 'Raoul Grosbedon <[email protected]>',
            'subject': '${object.name}',
            'body_html': '${object.description}',
            'user_signature': True,
            'email_to': '[email protected], [email protected]',
            'email_cc': '[email protected]',
            'partner_to': '${user.partner_id.id},%s,%s,-1' % (self.user_raoul.partner_id.id, self.user_bert.partner_id.id)
        })
        # not force send: email_recipients is not taken into account
        msg_id = email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, context=context)
        mail = self.mail_mail.browse(cr, uid, msg_id, context=context)
        self.assertEqual(mail.subject, 'Pigs', 'email_template: send_mail: wrong subject')
        self.assertEqual(mail.email_to, '[email protected], [email protected]', 'email_template: send_mail: wrong email_to')
        self.assertEqual(mail.email_cc, '[email protected]', 'email_template: send_mail: wrong email_cc')
        self.assertEqual(
            set([partner.id for partner in mail.recipient_ids]),
            set((self.partner_admin_id, self.user_raoul.partner_id.id, self.user_bert.partner_id.id)),
            'email_template: send_mail: wrong management of partner_to')
        # force send: take email_recipients into account
        email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, force_send=True, context=context)
        sent_emails = self._build_email_kwargs_list
        email_to_lst = [
            ['[email protected]', '[email protected]'], ['Administrator <[email protected]>'],
            ['Raoul Grosbedon <[email protected]>'], ['Bert Tartignole <[email protected]>']]
        self.assertEqual(len(sent_emails), 4, 'email_template: send_mail: 3 valid email recipients + email_to -> should send 4 emails')
        for email in sent_emails:
            self.assertIn(email['email_to'], email_to_lst, 'email_template: send_mail: wrong email_recipients')
 | 
	agpl-3.0 | -2,380,559,610,332,477,000 | 57.680328 | 172 | 0.60155 | false | 
| 
	eicher31/compassion-modules | 
	child_compassion/mappings/household_mapping.py | 
	3 | 
	3536 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
#    Releasing children from poverty in Jesus' name
#    @author: Emanuel Cino <[email protected]>
#
#    The licence is in the file __manifest__.py
#
##############################################################################
from odoo.addons.message_center_compassion.mappings.base_mapping import \
    OnrampMapping
class HouseHoldMapping(OnrampMapping):
    ODOO_MODEL = 'compassion.household'
    CONNECT_MAPPING = {
        "BeneficiaryHouseholdMemberList": ('member_ids',
                                           'compassion.household.member'),
        "BeneficiaryHouseholdMemberDetails": ('member_ids',
                                              'compassion.household.member'),
        "FemaleGuardianEmploymentStatus": 'female_guardian_job_type',
        "FemaleGuardianOccupation": 'female_guardian_job',
        "Household_ID": "household_id",
        "Household_Name": "name",
        "IsNaturalFatherLivingWithChild": 'father_living_with_child',
        "IsNaturalMotherLivingWithChild": 'mother_living_with_child',
        "MaleGuardianEmploymentStatus": 'male_guardian_job_type',
        "MaleGuardianOccupation": "male_guardian_job",
        "NaturalFatherAlive": "father_alive",
        "NaturalMotherAlive": "mother_alive",
        "NumberOfSiblingBeneficiaries": "number_beneficiaries",
        "ParentsMaritalStatus": "marital_status",
        "ParentsTogether": "parents_together",
        'RevisedValues': 'revised_value_ids',
        # Not define
        "SourceKitName": None,
    }
    def _process_odoo_data(self, odoo_data):
        # Unlink old revised values and create new ones
        if isinstance(odoo_data.get('revised_value_ids'), list):
            household = self.env[self.ODOO_MODEL].search(
                [('household_id', '=', odoo_data['household_id'])])
            household.revised_value_ids.unlink()
            for value in odoo_data['revised_value_ids']:
                self.env['compassion.major.revision'].create({
                    'name': value,
                    'household_id': household.id,
                })
            del odoo_data['revised_value_ids']
        # Replace dict by a tuple for the ORM update/create
        if 'member_ids' in odoo_data:
            # Remove all members
            household = self.env[self.ODOO_MODEL].search(
                [('household_id', '=', odoo_data['household_id'])])
            household.member_ids.unlink()
            member_list = list()
            for member in odoo_data['member_ids']:
                orm_tuple = (0, 0, member)
                member_list.append(orm_tuple)
            odoo_data['member_ids'] = member_list or False
        for key in odoo_data.iterkeys():
            val = odoo_data[key]
            if isinstance(val, basestring) and val.lower() in (
                    'null', 'false', 'none', 'other', 'unknown'):
                odoo_data[key] = False
class HouseholdMemberMapping(OnrampMapping):
    ODOO_MODEL = 'compassion.household.member'
    CONNECT_MAPPING = {
        "Beneficiary_GlobalID": ('child_id.global_id', 'compassion.child'),
        "Beneficiary_LocalID": 'beneficiary_local_id',
        "FullName": None,
        "HouseholdMemberRole": 'role',
        "HouseholdMember_Name": 'name',
        "IsCaregiver": 'is_caregiver',
        "IsPrimaryCaregiver": 'is_primary_caregiver',
    }
 | 
	agpl-3.0 | 8,306,735,830,094,433,000 | 40.116279 | 78 | 0.565328 | false | 
| 
	go-bears/nupic | 
	src/nupic/encoders/category.py | 
	39 | 
	7798 | 
	# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc.  Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program.  If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.data.fieldmeta import FieldMetaType
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.base import Encoder, EncoderResult
from nupic.encoders.scalar import ScalarEncoder
UNKNOWN = "<UNKNOWN>"
class CategoryEncoder(Encoder):
  """Encodes a list of discrete categories (described by strings), that aren't
  related to each other, so we never emit a mixture of categories.
  The value of zero is reserved for "unknown category"
  Internally we use a ScalarEncoder with a radius of 1, but since we only encode
  integers, we never get mixture outputs.
  The SDRCategoryEncoder uses a different method to encode categories"""
  def __init__(self, w, categoryList, name="category", verbosity=0, forced=False):
    """params:
       forced (default False) : if True, skip checks for parameters' settings; see encoders/scalar.py for details
    """
    self.encoders = None
    self.verbosity = verbosity
    # number of categories includes "unknown"
    self.ncategories = len(categoryList) + 1
    self.categoryToIndex = dict()
    self.indexToCategory = dict()
    self.indexToCategory[0] = UNKNOWN
    for i in xrange(len(categoryList)):
      self.categoryToIndex[categoryList[i]] = i+1
      self.indexToCategory[i+1] = categoryList[i]
    self.encoder = ScalarEncoder(w, minval=0, maxval=self.ncategories - 1,
                      radius=1, periodic=False, forced=forced)
    self.width = w * self.ncategories
    assert self.encoder.getWidth() == self.width
    self.description = [(name, 0)]
    self.name = name
    # These are used to support the topDownCompute method
    self._topDownMappingM = None
    # This gets filled in by getBucketValues
    self._bucketValues = None
  def getDecoderOutputFieldTypes(self):
    """ [Encoder class virtual method override]
    """
    # TODO: change back to string meta-type after the decoding logic is fixed
    #       to output strings instead of internal index values.
    #return (FieldMetaType.string,)
    return (FieldMetaType.integer,)
  def getWidth(self):
    return self.width
  def getDescription(self):
    return self.description
  def getScalars(self, input):
    """ See method description in base.py """
    if input == SENTINEL_VALUE_FOR_MISSING_DATA:
      return numpy.array([None])
    else:
      return numpy.array([self.categoryToIndex.get(input, 0)])
  def getBucketIndices(self, input):
    """ See method description in base.py """
    # Get the bucket index from the underlying scalar encoder
    if input == SENTINEL_VALUE_FOR_MISSING_DATA:
      return [None]
    else:
      return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))
  def encodeIntoArray(self, input, output):
    # if not found, we encode category 0
    if input == SENTINEL_VALUE_FOR_MISSING_DATA:
      output[0:] = 0
      val = "<missing>"
    else:
      val = self.categoryToIndex.get(input, 0)
      self.encoder.encodeIntoArray(val, output)
    if self.verbosity >= 2:
      print "input:", input, "va:", val, "output:", output
      print "decoded:", self.decodedToStr(self.decode(output))
  def decode(self, encoded, parentFieldName=''):
    """ See the function description in base.py
    """
    # Get the scalar values from the underlying scalar encoder
    (fieldsDict, fieldNames) = self.encoder.decode(encoded)
    if len(fieldsDict) == 0:
      return (fieldsDict, fieldNames)
    # Expect only 1 field
    assert(len(fieldsDict) == 1)
    # Get the list of categories the scalar values correspond to and
    #  generate the description from the category name(s).
    (inRanges, inDesc) = fieldsDict.values()[0]
    outRanges = []
    desc = ""
    for (minV, maxV) in inRanges:
      minV = int(round(minV))
      maxV = int(round(maxV))
      outRanges.append((minV, maxV))
      while minV <= maxV:
        if len(desc) > 0:
          desc += ", "
        desc += self.indexToCategory[minV]
        minV += 1
    # Return result
    if parentFieldName != '':
      fieldName = "%s.%s" % (parentFieldName, self.name)
    else:
      fieldName = self.name
    return ({fieldName: (outRanges, desc)}, [fieldName])
  def closenessScores(self, expValues, actValues, fractional=True,):
    """ See the function description in base.py
    kwargs will have the keyword "fractional", which is ignored by this encoder
    """
    expValue = expValues[0]
    actValue = actValues[0]
    if expValue == actValue:
      closeness = 1.0
    else:
      closeness = 0.0
    if not fractional:
      closeness = 1.0 - closeness
    return numpy.array([closeness])
  def getBucketValues(self):
    """ See the function description in base.py """
    if self._bucketValues is None:
      numBuckets = len(self.encoder.getBucketValues())
      self._bucketValues = []
      for bucketIndex in range(numBuckets):
        self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value)
    return self._bucketValues
  def getBucketInfo(self, buckets):
    """ See the function description in base.py
    """
    # For the category encoder, the bucket index is the category index
    bucketInfo = self.encoder.getBucketInfo(buckets)[0]
    categoryIndex = int(round(bucketInfo.value))
    category = self.indexToCategory[categoryIndex]
    return [EncoderResult(value=category, scalar=categoryIndex,
                         encoding=bucketInfo.encoding)]
  def topDownCompute(self, encoded):
    """ See the function description in base.py
    """
    encoderResult = self.encoder.topDownCompute(encoded)[0]
    value = encoderResult.value
    categoryIndex = int(round(value))
    category = self.indexToCategory[categoryIndex]
    return EncoderResult(value=category, scalar=categoryIndex,
                         encoding=encoderResult.encoding)
  @classmethod
  def read(cls, proto):
    encoder = object.__new__(cls)
    encoder.verbosity = proto.verbosity
    encoder.encoder = ScalarEncoder.read(proto.encoder)
    encoder.width = proto.width
    encoder.description = [(proto.name, 0)]
    encoder.name = proto.name
    encoder.indexToCategory = {x.index: x.category
                               for x in proto.indexToCategory}
    encoder.categoryToIndex = {category: index
                               for index, category
                               in encoder.indexToCategory.items()
                               if category != UNKNOWN}
    encoder._topDownMappingM = None
    encoder._bucketValues = None
    return encoder
  def write(self, proto):
    proto.width = self.width
    proto.indexToCategory = [
      {"index": index, "category": category}
      for index, category in self.indexToCategory.items()
    ]
    proto.name = self.name
    proto.verbosity = self.verbosity
    self.encoder.write(proto.encoder)
 | 
	agpl-3.0 | -742,178,916,835,247,100 | 29.944444 | 113 | 0.662349 | false | 
| 
	dvliman/jaikuengine | 
	.google_appengine/lib/django-1.5/tests/regressiontests/extra_regress/models.py | 
	114 | 
	1365 | 
	from __future__ import unicode_literals
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RevisionableModel(models.Model):
    base = models.ForeignKey('self', null=True)
    title = models.CharField(blank=True, max_length=255)
    when = models.DateTimeField(default=datetime.datetime.now)
    def __str__(self):
        return "%s (%s, %s)" % (self.title, self.id, self.base.id)
    def save(self, *args, **kwargs):
        super(RevisionableModel, self).save(*args, **kwargs)
        if not self.base:
            self.base = self
            kwargs.pop('force_insert', None)
            kwargs.pop('force_update', None)
            super(RevisionableModel, self).save(*args, **kwargs)
    def new_revision(self):
        new_revision = copy.copy(self)
        new_revision.pk = None
        return new_revision
class Order(models.Model):
    created_by = models.ForeignKey(User)
    text = models.TextField()
@python_2_unicode_compatible
class TestObject(models.Model):
    first = models.CharField(max_length=20)
    second = models.CharField(max_length=20)
    third = models.CharField(max_length=20)
    def __str__(self):
        return 'TestObject: %s,%s,%s' % (self.first,self.second,self.third)
 | 
	apache-2.0 | -6,502,126,748,451,387,000 | 29.333333 | 75 | 0.667399 | false | 
| 
	goFrendiAsgard/kokoropy | 
	kokoropy/packages/sqlalchemy/dialects/mysql/pyodbc.py | 
	32 | 
	2640 | 
	# mysql/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pyodbc
    :name: PyODBC
    :dbapi: pyodbc
    :connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
    :url: http://pypi.python.org/pypi/pyodbc/
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25).   Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from .base import MySQLDialect, MySQLExecutionContext
from ...connectors.pyodbc import PyODBCConnector
from ... import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
    def get_lastrowid(self):
        cursor = self.create_cursor()
        cursor.execute("SELECT LAST_INSERT_ID()")
        lastrowid = cursor.fetchone()[0]
        cursor.close()
        return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
    supports_unicode_statements = False
    execution_ctx_cls = MySQLExecutionContext_pyodbc
    pyodbc_driver_name = "MySQL"
    def __init__(self, **kw):
        # deal with http://code.google.com/p/pyodbc/issues/detail?id=25
        kw.setdefault('convert_unicode', True)
        super(MySQLDialect_pyodbc, self).__init__(**kw)
    def _detect_charset(self, connection):
        """Sniff out the character set in use for connection results."""
        # Prefer 'character_set_results' for the current connection over the
        # value in the driver.  SET NAMES or individual variable SETs will
        # change the charset without updating the driver's view of the world.
        #
        # If it's decided that issuing that sort of SQL leaves you SOL, then
        # this can prefer the driver value.
        rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
        opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
        for key in ('character_set_connection', 'character_set'):
            if opts.get(key, None):
                return opts[key]
        util.warn("Could not detect the connection character set.  "
                  "Assuming latin1.")
        return 'latin1'
    def _extract_error_code(self, exception):
        m = re.compile(r"\((\d+)\)").search(str(exception.args))
        c = m.group(1)
        if c:
            return int(c)
        else:
            return None
dialect = MySQLDialect_pyodbc
 | 
	mit | -6,467,777,041,147,659,000 | 31.195122 | 77 | 0.660606 | false | 
| 
	chienlieu2017/it_management | 
	odoo/addons/website_event/controllers/main.py | 
	7 | 
	11571 | 
	# -*- coding: utf-8 -*-
import babel.dates
import re
import werkzeug
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from odoo import fields, http, _
from odoo.addons.website.models.website import slug
from odoo.http import request
class WebsiteEventController(http.Controller):
    @http.route(['/event', '/event/page/<int:page>', '/events', '/events/page/<int:page>'], type='http', auth="public", website=True)
    def events(self, page=1, **searches):
        Event = request.env['event.event']
        EventType = request.env['event.type']
        searches.setdefault('date', 'all')
        searches.setdefault('type', 'all')
        searches.setdefault('country', 'all')
        domain_search = {}
        def sdn(date):
            return fields.Datetime.to_string(date.replace(hour=23, minute=59, second=59))
        def sd(date):
            return fields.Datetime.to_string(date)
        today = datetime.today()
        dates = [
            ['all', _('Next Events'), [("date_end", ">", sd(today))], 0],
            ['today', _('Today'), [
                ("date_end", ">", sd(today)),
                ("date_begin", "<", sdn(today))],
                0],
            ['week', _('This Week'), [
                ("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))),
                ("date_begin", "<", sdn(today + relativedelta(days=6-today.weekday())))],
                0],
            ['nextweek', _('Next Week'), [
                ("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))),
                ("date_begin", "<", sdn(today + relativedelta(days=13-today.weekday())))],
                0],
            ['month', _('This month'), [
                ("date_end", ">=", sd(today.replace(day=1))),
                ("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))],
                0],
            ['nextmonth', _('Next month'), [
                ("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))),
                ("date_begin", "<", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))],
                0],
            ['old', _('Old Events'), [
                ("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))],
                0],
        ]
        # search domains
        # TDE note: WTF ???
        current_date = None
        current_type = None
        current_country = None
        for date in dates:
            if searches["date"] == date[0]:
                domain_search["date"] = date[2]
                if date[0] != 'all':
                    current_date = date[1]
        if searches["type"] != 'all':
            current_type = EventType.browse(int(searches['type']))
            domain_search["type"] = [("event_type_id", "=", int(searches["type"]))]
        if searches["country"] != 'all' and searches["country"] != 'online':
            current_country = request.env['res.country'].browse(int(searches['country']))
            domain_search["country"] = ['|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False)]
        elif searches["country"] == 'online':
            domain_search["country"] = [("country_id", "=", False)]
        def dom_without(without):
            domain = [('state', "in", ['draft', 'confirm', 'done'])]
            for key, search in domain_search.items():
                if key != without:
                    domain += search
            return domain
        # count by domains without self search
        for date in dates:
            if date[0] != 'old':
                date[3] = Event.search_count(dom_without('date') + date[2])
        domain = dom_without('type')
        types = Event.read_group(domain, ["id", "event_type_id"], groupby=["event_type_id"], orderby="event_type_id")
        types.insert(0, {
            'event_type_id_count': sum([int(type['event_type_id_count']) for type in types]),
            'event_type_id': ("all", _("All Categories"))
        })
        domain = dom_without('country')
        countries = Event.read_group(domain, ["id", "country_id"], groupby="country_id", orderby="country_id")
        countries.insert(0, {
            'country_id_count': sum([int(country['country_id_count']) for country in countries]),
            'country_id': ("all", _("All Countries"))
        })
        step = 10  # Number of events per page
        event_count = Event.search_count(dom_without("none"))
        pager = request.website.pager(
            url="/event",
            url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')},
            total=event_count,
            page=page,
            step=step,
            scope=5)
        order = 'website_published desc, date_begin'
        if searches.get('date', 'all') == 'old':
            order = 'website_published desc, date_begin desc'
        events = Event.search(dom_without("none"), limit=step, offset=pager['offset'], order=order)
        values = {
            'current_date': current_date,
            'current_country': current_country,
            'current_type': current_type,
            'event_ids': events,  # event_ids used in website_event_track so we keep name as it is
            'dates': dates,
            'types': types,
            'countries': countries,
            'pager': pager,
            'searches': searches,
            'search_path': "?%s" % werkzeug.url_encode(searches),
        }
        return request.render("website_event.index", values)
    @http.route(['/event/<model("event.event"):event>/page/<path:page>'], type='http', auth="public", website=True)
    def event_page(self, event, page, **post):
        values = {
            'event': event,
            'main_object': event
        }
        if '.' not in page:
            page = 'website_event.%s' % page
        try:
            request.website.get_template(page)
        except ValueError:
            # page not found
            values['path'] = re.sub(r"^website_event\.", '', page)
            values['from_template'] = 'website_event.default_page'  # .strip('website_event.')
            page = 'website.page_404'
        return request.render(page, values)
    @http.route(['/event/<model("event.event"):event>'], type='http', auth="public", website=True)
    def event(self, event, **post):
        if event.menu_id and event.menu_id.child_id:
            target_url = event.menu_id.child_id[0].url
        else:
            target_url = '/event/%s/register' % str(event.id)
        if post.get('enable_editor') == '1':
            target_url += '?enable_editor=1'
        return request.redirect(target_url)
    @http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True)
    def event_register(self, event, **post):
        values = {
            'event': event,
            'main_object': event,
            'range': range,
        }
        return request.render("website_event.event_description_full", values)
    @http.route('/event/add_event', type='http', auth="user", methods=['POST'], website=True)
    def add_event(self, event_name="New Event", **kwargs):
        event = self._add_event(event_name, request.context)
        return request.redirect("/event/%s/register?enable_editor=1" % slug(event))
    def _add_event(self, event_name=None, context=None, **kwargs):
        if not event_name:
            event_name = _("New Event")
        date_begin = datetime.today() + timedelta(days=(14))
        vals = {
            'name': event_name,
            'date_begin': fields.Date.to_string(date_begin),
            'date_end': fields.Date.to_string((date_begin + timedelta(days=(1)))),
            'seats_available': 1000,
        }
        return request.env['event.event'].with_context(context or {}).create(vals)
    def get_formated_date(self, event):
        start_date = fields.Datetime.from_string(event.date_begin).date()
        end_date = fields.Datetime.from_string(event.date_end).date()
        month = babel.dates.get_month_names('abbreviated', locale=event.env.context.get('lang', 'en_US'))[start_date.month]
        return ('%s %s%s') % (month, start_date.strftime("%e"), (end_date != start_date and ("-" + end_date.strftime("%e")) or ""))
    @http.route('/event/get_country_event_list', type='http', auth='public', website=True)
    def get_country_events(self, **post):
        Event = request.env['event.event']
        country_code = request.session['geoip'].get('country_code')
        result = {'events': [], 'country': False}
        events = None
        if country_code:
            country = request.env['res.country'].search([('code', '=', country_code)], limit=1)
            events = Event.search(['|', ('address_id', '=', None), ('country_id.code', '=', country_code), ('date_begin', '>=', '%s 00:00:00' % fields.Date.today()), ('state', '=', 'confirm')], order="date_begin")
        if not events:
            events = Event.search([('date_begin', '>=', '%s 00:00:00' % fields.Date.today()), ('state', '=', 'confirm')], order="date_begin")
        for event in events:
            if country_code and event.country_id.code == country_code:
                result['country'] = country
            result['events'].append({
                "date": self.get_formated_date(event),
                "event": event,
                "url": event.website_url})
        return request.render("website_event.country_events_list", result)
    def _process_tickets_details(self, data):
        nb_register = int(data.get('nb_register-0', 0))
        if nb_register:
            return [{'id': 0, 'name': 'Registration', 'quantity': nb_register, 'price': 0}]
        return []
    @http.route(['/event/<model("event.event"):event>/registration/new'], type='json', auth="public", methods=['POST'], website=True)
    def registration_new(self, event, **post):
        tickets = self._process_tickets_details(post)
        if not tickets:
            return request.redirect("/event/%s" % slug(event))
        return request.env['ir.ui.view'].render_template("website_event.registration_attendee_details", {'tickets': tickets, 'event': event})
    def _process_registration_details(self, details):
        ''' Process data posted from the attendee details form. '''
        registrations = {}
        global_values = {}
        for key, value in details.iteritems():
            counter, field_name = key.split('-', 1)
            if counter == '0':
                global_values[field_name] = value
            else:
                registrations.setdefault(counter, dict())[field_name] = value
        for key, value in global_values.iteritems():
            for registration in registrations.values():
                registration[key] = value
        return registrations.values()
    @http.route(['/event/<model("event.event"):event>/registration/confirm'], type='http', auth="public", methods=['POST'], website=True)
    def registration_confirm(self, event, **post):
        Attendees = request.env['event.registration']
        registrations = self._process_registration_details(post)
        for registration in registrations:
            registration['event_id'] = event
            Attendees += Attendees.sudo().create(
                Attendees._prepare_attendee_values(registration))
        return request.render("website_event.registration_complete", {
            'attendees': Attendees,
            'event': event,
        })
 | 
	gpl-3.0 | 8,617,715,063,772,964,000 | 43.675676 | 213 | 0.551465 | false | 
| 
	derekjchow/models | 
	research/deeplab/core/nas_cell.py | 
	1 | 
	8432 | 
	# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cell structure used by NAS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from deeplab.core.utils import resize_bilinear
from deeplab.core.utils import scale_dimension
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
class NASBaseCell(object):
  """NASNet Cell class that is used as a 'layer' in image architectures.
  See https://arxiv.org/abs/1707.07012 and https://arxiv.org/abs/1712.00559.
  Args:
    num_conv_filters: The number of filters for each convolution operation.
    operations: List of operations that are performed in the NASNet Cell in
      order.
    used_hiddenstates: Binary array that signals if the hiddenstate was used
      within the cell. This is used to determine what outputs of the cell
      should be concatenated together.
    hiddenstate_indices: Determines what hiddenstates should be combined
      together with the specified operations to create the NASNet cell.
  """
  def __init__(self, num_conv_filters, operations, used_hiddenstates,
               hiddenstate_indices, drop_path_keep_prob, total_num_cells,
               total_training_steps):
    if len(hiddenstate_indices) != len(operations):
      raise ValueError(
          'Number of hiddenstate_indices and operations should be the same.')
    if len(operations) % 2:
      raise ValueError('Number of operations should be even.')
    self._num_conv_filters = num_conv_filters
    self._operations = operations
    self._used_hiddenstates = used_hiddenstates
    self._hiddenstate_indices = hiddenstate_indices
    self._drop_path_keep_prob = drop_path_keep_prob
    self._total_num_cells = total_num_cells
    self._total_training_steps = total_training_steps
  def __call__(self, net, scope, filter_scaling, stride, prev_layer, cell_num):
    """Runs the conv cell."""
    self._cell_num = cell_num
    self._filter_scaling = filter_scaling
    self._filter_size = int(self._num_conv_filters * filter_scaling)
    with tf.variable_scope(scope):
      net = self._cell_base(net, prev_layer)
      for i in range(len(self._operations) // 2):
        with tf.variable_scope('comb_iter_{}'.format(i)):
          h1 = net[self._hiddenstate_indices[i * 2]]
          h2 = net[self._hiddenstate_indices[i * 2 + 1]]
          with tf.variable_scope('left'):
            h1 = self._apply_conv_operation(
                h1, self._operations[i * 2], stride,
                self._hiddenstate_indices[i * 2] < 2)
          with tf.variable_scope('right'):
            h2 = self._apply_conv_operation(
                h2, self._operations[i * 2 + 1], stride,
                self._hiddenstate_indices[i * 2 + 1] < 2)
          with tf.variable_scope('combine'):
            h = h1 + h2
          net.append(h)
      with tf.variable_scope('cell_output'):
        net = self._combine_unused_states(net)
      return net
  def _cell_base(self, net, prev_layer):
    """Runs the beginning of the conv cell before the chosen ops are run."""
    filter_size = self._filter_size
    if prev_layer is None:
      prev_layer = net
    else:
      if net.shape[2] != prev_layer.shape[2]:
        prev_layer = resize_bilinear(
            prev_layer, tf.shape(net)[1:3], prev_layer.dtype)
      if filter_size != prev_layer.shape[3]:
        prev_layer = tf.nn.relu(prev_layer)
        prev_layer = slim.conv2d(prev_layer, filter_size, 1, scope='prev_1x1')
        prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
    net = tf.nn.relu(net)
    net = slim.conv2d(net, filter_size, 1, scope='1x1')
    net = slim.batch_norm(net, scope='beginning_bn')
    net = tf.split(axis=3, num_or_size_splits=1, value=net)
    net.append(prev_layer)
    return net
  def _apply_conv_operation(self, net, operation, stride,
                            is_from_original_input):
    """Applies the predicted conv operation to net."""
    if stride > 1 and not is_from_original_input:
      stride = 1
    input_filters = net.shape[3]
    filter_size = self._filter_size
    if 'separable' in operation:
      num_layers = int(operation.split('_')[-1])
      kernel_size = int(operation.split('x')[0][-1])
      for layer_num in range(num_layers):
        net = tf.nn.relu(net)
        net = slim.separable_conv2d(
            net,
            filter_size,
            kernel_size,
            depth_multiplier=1,
            scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
            stride=stride)
        net = slim.batch_norm(
            net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
        stride = 1
    elif 'atrous' in operation:
      kernel_size = int(operation.split('x')[0][-1])
      net = tf.nn.relu(net)
      if stride == 2:
        scaled_height = scale_dimension(tf.shape(net)[1], 0.5)
        scaled_width = scale_dimension(tf.shape(net)[2], 0.5)
        net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype)
        net = slim.conv2d(net, filter_size, kernel_size, rate=1,
                          scope='atrous_{0}x{0}'.format(kernel_size))
      else:
        net = slim.conv2d(net, filter_size, kernel_size, rate=2,
                          scope='atrous_{0}x{0}'.format(kernel_size))
      net = slim.batch_norm(net, scope='bn_atr_{0}x{0}'.format(kernel_size))
    elif operation in ['none']:
      if stride > 1 or (input_filters != filter_size):
        net = tf.nn.relu(net)
        net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
        net = slim.batch_norm(net, scope='bn_1')
    elif 'pool' in operation:
      pooling_type = operation.split('_')[0]
      pooling_shape = int(operation.split('_')[-1].split('x')[0])
      if pooling_type == 'avg':
        net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding='SAME')
      elif pooling_type == 'max':
        net = slim.max_pool2d(net, pooling_shape, stride=stride, padding='SAME')
      else:
        raise ValueError('Unimplemented pooling type: ', pooling_type)
      if input_filters != filter_size:
        net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
        net = slim.batch_norm(net, scope='bn_1')
    else:
      raise ValueError('Unimplemented operation', operation)
    if operation != 'none':
      net = self._apply_drop_path(net)
    return net
  def _combine_unused_states(self, net):
    """Concatenates the unused hidden states of the cell."""
    used_hiddenstates = self._used_hiddenstates
    states_to_combine = ([
        h for h, is_used in zip(net, used_hiddenstates) if not is_used])
    net = tf.concat(values=states_to_combine, axis=3)
    return net
  @tf.contrib.framework.add_arg_scope
  def _apply_drop_path(self, net):
    """Apply drop_path regularization."""
    drop_path_keep_prob = self._drop_path_keep_prob
    if drop_path_keep_prob < 1.0:
      # Scale keep prob by layer number.
      assert self._cell_num != -1
      layer_ratio = (self._cell_num + 1) / float(self._total_num_cells)
      drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
      # Decrease keep prob over time.
      current_step = tf.cast(tf.train.get_or_create_global_step(), tf.float32)
      current_ratio = tf.minimum(1.0, current_step / self._total_training_steps)
      drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
      # Drop path.
      noise_shape = [tf.shape(net)[0], 1, 1, 1]
      random_tensor = drop_path_keep_prob
      random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
      binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
      keep_prob_inv = tf.cast(1.0 / drop_path_keep_prob, net.dtype)
      net = net * keep_prob_inv * binary_tensor
    return net
 | 
	apache-2.0 | 4,058,546,181,967,095,300 | 41.371859 | 80 | 0.629032 | false | 
| 
	lancezlin/ml_template_py | 
	lib/python2.7/site-packages/sklearn/metrics/tests/test_score_objects.py | 
	15 | 
	17443 | 
	import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
                             log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
                                    _passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
                      'neg_mean_squared_error', 'neg_median_absolute_error',
                      'mean_absolute_error',
                      'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
               'roc_auc', 'average_precision', 'precision',
               'precision_weighted', 'precision_macro', 'precision_micro',
               'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
               'neg_log_loss', 'log_loss',
               'adjusted_rand_score'  # not really, but works
               ]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
    # Make estimators that make sense to test various scoring methods
    sensible_regr = DummyRegressor(strategy='median')
    sensible_regr.fit(X_train, y_train)
    sensible_clf = DecisionTreeClassifier(random_state=0)
    sensible_clf.fit(X_train, y_train)
    sensible_ml_clf = DecisionTreeClassifier(random_state=0)
    sensible_ml_clf.fit(X_train, y_ml_train)
    return dict(
        [(name, sensible_regr) for name in REGRESSION_SCORERS] +
        [(name, sensible_clf) for name in CLF_SCORERS] +
        [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
    )
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
    # Create some memory mapped data
    global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
    TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
    X, y = make_classification(n_samples=30, n_features=5, random_state=0)
    _, y_ml = make_multilabel_classification(n_samples=X.shape[0],
                                             random_state=0)
    filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
    joblib.dump((X, y, y_ml), filename)
    X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
    ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
    global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
    # GC closes the mmap file descriptors
    X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
    shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
    """Dummy estimator to test check_scoring"""
    pass
class EstimatorWithFit(BaseEstimator):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        return self
class EstimatorWithFitAndScore(object):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        return self
    def score(self, X, y):
        return 1.0
class EstimatorWithFitAndPredict(object):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        self.y = y
        return self
    def predict(self, X):
        return self.y
class DummyScorer(object):
    """Dummy scorer that always returns 1."""
    def __call__(self, est, X, y):
        return 1
def test_all_scorers_repr():
    # Test that all scorers have a working repr
    for name, scorer in SCORERS.items():
        repr(scorer)
def test_check_scoring():
    # Test all branches of check_scoring
    estimator = EstimatorWithoutFit()
    pattern = (r"estimator should be an estimator implementing 'fit' method,"
               r" .* was passed")
    assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
    estimator = EstimatorWithFitAndScore()
    estimator.fit([[1]], [1])
    scorer = check_scoring(estimator)
    assert_true(scorer is _passthrough_scorer)
    assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
    estimator = EstimatorWithFitAndPredict()
    estimator.fit([[1]], [1])
    pattern = (r"If no scoring is specified, the estimator passed should have"
               r" a 'score' method\. The estimator .* does not\.")
    assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
    scorer = check_scoring(estimator, "accuracy")
    assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
    estimator = EstimatorWithFit()
    scorer = check_scoring(estimator, "accuracy")
    assert_true(isinstance(scorer, _PredictScorer))
    estimator = EstimatorWithFit()
    scorer = check_scoring(estimator, allow_none=True)
    assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
    # test that check_scoring works on GridSearchCV and pipeline.
    # slightly redundant non-regression test.
    grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
    scorer = check_scoring(grid, "f1")
    assert_true(isinstance(scorer, _PredictScorer))
    pipe = make_pipeline(LinearSVC())
    scorer = check_scoring(pipe, "f1")
    assert_true(isinstance(scorer, _PredictScorer))
    # check that cross_val_score definitely calls the scorer
    # and doesn't make any assumptions about the estimator apart from having a
    # fit.
    scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
                             scoring=DummyScorer())
    assert_array_equal(scores, 1)
def test_make_scorer():
    # Sanity check on the make_scorer factory function.
    f = lambda *args: 0
    assert_raises(ValueError, make_scorer, f, needs_threshold=True,
                  needs_proba=True)
def test_classification_scores():
    # Test classification scorers.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LinearSVC(random_state=0)
    clf.fit(X_train, y_train)
    for prefix, metric in [('f1', f1_score), ('precision', precision_score),
                           ('recall', recall_score)]:
        score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='weighted')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='macro')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='micro')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=1)
        assert_almost_equal(score1, score2)
    # test fbeta score that takes an argument
    scorer = make_scorer(fbeta_score, beta=2)
    score1 = scorer(clf, X_test, y_test)
    score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
    assert_almost_equal(score1, score2)
    # test that custom scorer can be pickled
    unpickled_scorer = pickle.loads(pickle.dumps(scorer))
    score3 = unpickled_scorer(clf, X_test, y_test)
    assert_almost_equal(score1, score3)
    # smoke test the repr:
    repr(fbeta_score)
def test_regression_scorers():
    # Test regression scorers.
    diabetes = load_diabetes()
    X, y = diabetes.data, diabetes.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = Ridge()
    clf.fit(X_train, y_train)
    score1 = get_scorer('r2')(clf, X_test, y_test)
    score2 = r2_score(y_test, clf.predict(X_test))
    assert_almost_equal(score1, score2)
def test_thresholded_scorers():
    # Test scorers that take thresholds.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LogisticRegression(random_state=0)
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.decision_function(X_test))
    score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    assert_almost_equal(score1, score3)
    logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
    logloss = log_loss(y_test, clf.predict_proba(X_test))
    assert_almost_equal(-logscore, logloss)
    # same for an estimator without decision_function
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    # test with a regressor (no decision_function)
    reg = DecisionTreeRegressor()
    reg.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(reg, X_test, y_test)
    score2 = roc_auc_score(y_test, reg.predict(X_test))
    assert_almost_equal(score1, score2)
    # Test that an exception is raised on more than two classes
    X, y = make_blobs(random_state=0, centers=3)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf.fit(X_train, y_train)
    assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
    # Test that the scorer work with multilabel-indicator format
    # for multilabel and multi-output multi-class classifier
    X, y = make_multilabel_classification(allow_unlabeled=False,
                                          random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    # Multi-output multi-class predict_proba
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    y_proba = clf.predict_proba(X_test)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
    assert_almost_equal(score1, score2)
    # Multi-output multi-class decision_function
    # TODO Is there any yet?
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    clf._predict_proba = clf.predict_proba
    clf.predict_proba = None
    clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
    y_proba = clf.decision_function(X_test)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
    assert_almost_equal(score1, score2)
    # Multilabel predict_proba
    clf = OneVsRestClassifier(DecisionTreeClassifier())
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
    assert_almost_equal(score1, score2)
    # Multilabel decision function
    clf = OneVsRestClassifier(LinearSVC(random_state=0))
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.decision_function(X_test))
    assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
    # Test clustering scorers against gold standard labeling.
    # We don't have any real unsupervised Scorers yet.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    km = KMeans(n_clusters=3)
    km.fit(X_train)
    score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
    score2 = adjusted_rand_score(y_test, km.predict(X_test))
    assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
    # Test that when a list of scores is returned, we raise proper errors.
    X, y = make_blobs(random_state=0)
    f1_scorer_no_average = make_scorer(f1_score, average=None)
    clf = DecisionTreeClassifier()
    assert_raises(ValueError, cross_val_score, clf, X, y,
                  scoring=f1_scorer_no_average)
    grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
                               param_grid={'max_depth': [1, 2]})
    assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
    # Test that scorers support sample_weight or raise sensible errors
    # Unlike the metrics invariance test, in the scorer case it's harder
    # to ensure that, on the classifier output, weighted and unweighted
    # scores really should be unequal.
    X, y = make_classification(random_state=0)
    _, y_ml = make_multilabel_classification(n_samples=X.shape[0],
                                             random_state=0)
    split = train_test_split(X, y, y_ml, random_state=0)
    X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
    sample_weight = np.ones_like(y_test)
    sample_weight[:10] = 0
    # get sensible estimators for each metric
    estimator = _make_estimators(X_train, y_train, y_ml_train)
    for name, scorer in SCORERS.items():
        if name in MULTILABEL_ONLY_SCORERS:
            target = y_ml_test
        else:
            target = y_test
        try:
            weighted = scorer(estimator[name], X_test, target,
                              sample_weight=sample_weight)
            ignored = scorer(estimator[name], X_test[10:], target[10:])
            unweighted = scorer(estimator[name], X_test, target)
            assert_not_equal(weighted, unweighted,
                             msg="scorer {0} behaves identically when "
                             "called with sample weights: {1} vs "
                             "{2}".format(name, weighted, unweighted))
            assert_almost_equal(weighted, ignored,
                                err_msg="scorer {0} behaves differently when "
                                "ignoring samples and setting sample_weight to"
                                " 0: {1} vs {2}".format(name, weighted,
                                                        ignored))
        except TypeError as e:
            assert_true("sample_weight" in str(e),
                        "scorer {0} raises unhelpful exception when called "
                        "with sample weights: {1}".format(name, str(e)))
@ignore_warnings  # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
    scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
    if scorer_name in MULTILABEL_ONLY_SCORERS:
        score = scorer(estimator, X_mm, y_ml_mm)
    else:
        score = scorer(estimator, X_mm, y_mm)
    assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
    # Non-regression test for #6147: some score functions would
    # return singleton memmap when computed on memmap data instead of scalar
    # float values.
    for name in SCORERS.keys():
        yield check_scorer_memmap, name
def test_deprecated_names():
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LogisticRegression(random_state=0)
    clf.fit(X_train, y_train)
    for name in ('mean_absolute_error', 'mean_squared_error',
                 'median_absolute_error', 'log_loss'):
        warning_msg = "Scoring method %s was renamed to" % name
        for scorer in (get_scorer(name), SCORERS[name]):
            assert_warns_message(DeprecationWarning,
                                 warning_msg,
                                 scorer, clf, X, y)
        assert_warns_message(DeprecationWarning,
                             warning_msg,
                             cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         LogisticRegression(), f1_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         LogisticRegression(), roc_auc_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         Ridge(), r2_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         KMeans(), adjusted_rand_score)
 | 
	mit | -8,321,461,546,816,175,000 | 37.935268 | 79 | 0.64513 | false | 
| 
	14thibea/megamix | 
	megamix/batch/DPGMM.py | 
	1 | 
	20644 | 
	# -*- coding: utf-8 -*-
#
#Created on Fri Apr 14 15:21:17 2017
#
#author: Elina Thibeau-Sutre
#
from .initializations import initialize_log_assignements,initialize_mcw
from .base import _full_covariance_matrices
from .base import _log_normal_matrix
from .base import BaseMixture
from .base import _log_B
import numpy as np
from scipy.special import psi,betaln
from scipy.misc import logsumexp
class DPVariationalGaussianMixture(BaseMixture):
    """
    Variational Bayesian Estimation of a Gaussian Mixture with Dirichlet Process
    
    This class allows to infer an approximate posterior distribution over the
    parameters of a Gaussian mixture distribution.
    
    The weights distribution follows a Dirichlet Process with attribute alpha.
    
    Parameters
    ----------
    
    n_components : int, defaults to 1.
        Number of clusters used.
    
    init : str, defaults to 'kmeans'.
        Method used in order to perform the initialization,
        must be in ['random', 'plus', 'AF_KMC', 'kmeans', 'GMM', 'VBGMM'].
    reg_covar : float, defaults to 1e-6
        In order to avoid null covariances this float is added to the diagonal
        of covariance matrices.
    
    type_init : str, defaults to 'resp'.        
        The algorithm is initialized using this data (responsibilities if 'resp'
        or means, covariances and weights if 'mcw').
    Other parameters
    ----------------                
    
    alpha_0 : float, Optional | defaults to None.
        The prior parameter on the weight distribution (Beta).
        A high value of alpha_0 will lead to equal weights, while a low value
        will allow some clusters to shrink and disappear. Must be greater than 0.
    
        If None, the value is set to 1/n_components                         
    
    beta_0 : float, Optional | defaults to None.
        The precision prior on the mean distribution (Gaussian).
        Must be greater than 0.
    
        If None, the value is set to 1.0                         
    
    nu_0 : float, Optional | defaults to None.
        The prior of the number of degrees of freedom on the covariance
        distributions (Wishart). Must be greater or equal to dim.
    
        If None, the value is set to dim
        
    means_prior : array (dim,), Optional | defaults to None
        The prior value to compute the value of the means.
        
        If None, the value is set to the mean of points_data
        
    cov_wishart_prior : type depends on covariance_type, Optional | defaults to None
        If covariance_type is 'full' type must be array (dim,dim)
        If covariance_type is 'spherical' type must be float
        The prior value to compute the value of the precisions.
        
    pypcoeff : float | defaults to 0
        If 0 the weights are generated according to a Dirichlet Process
        If >0 and <=1 the weights are generated according to a Pitman-Yor
        Process.
    Attributes
    ----------
    
    name : str
        The name of the method : 'VBGMM'
    
    alpha : array of floats (n_components,2)
        Contains the parameters of the weight distribution (Beta)
    
    beta : array of floats (n_components,)
        Contains coefficients which are multipied with the precision matrices
        to form the precision matrix on the Gaussian distribution of the means.    
    
    nu : array of floats (n_components,)
        Contains the number of degrees of freedom on the distribution of
        covariance matrices.
    
    _inv_prec : array of floats (n_components,dim,dim)
        Contains the equivalent of the matrix W described in Bishop's book. It
        is proportional to cov.
    
    _log_det_inv_prec : array of floats (n_components,)
        Contains the logarithm of the determinant of W matrices.
    
    cov : array of floats (n_components,dim,dim)
        Contains the computed covariance matrices of the mixture.
    
    means : array of floats (n_components,dim)
        Contains the computed means of the mixture.
    
    log_weights : array of floats (n_components,)
        Contains the logarithm of weights of each cluster.
    
    iter : int
        The number of iterations computed with the method fit()
        
    convergence_criterion_data : array of floats (iter,)
        Stores the value of the convergence criterion computed with data
        on which the model is fitted.
    
    convergence_criterion_test : array of floats (iter,) | if _early_stopping only
        Stores the value of the convergence criterion computed with test data
        if it exists.
    
    _is_initialized : bool
        Ensures that the method _initialize() has been used before using other
        methods such as score() or predict_log_assignements().
    
    Raises
    ------
    ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']...
    
    References
    ----------
    'Variational Inference for Dirichlet Process Mixtures', D. Blei and M. Jordan
 
    """
    
    def __init__(self, n_components=1,init="kmeans",alpha_0=None,beta_0=None,
                 nu_0=None,means_prior=None,cov_wishart_prior=None,
                 reg_covar=1e-6,type_init='resp',n_jobs=1,pypcoeff=0,
                 boost=None):
        
        super(DPVariationalGaussianMixture, self).__init__()
        
        self.n_components = n_components
        self.covariance_type = "full"
        self.init = init
        self.type_init = type_init
        self.reg_covar = reg_covar
        self.boost = boost
        
        self.alpha_0 = alpha_0
        self.beta_0 = beta_0
        self.nu_0 = nu_0
        self.pypcoeff = pypcoeff
        self._means_prior = means_prior
        self._inv_prec_prior = cov_wishart_prior
        self.n_jobs = n_jobs
        
        self._is_initialized = False
        self.iter = 0
        self.convergence_criterion_data = []
        self.convergence_criterion_test = []
        
        self._check_common_parameters()
        self._check_parameters()
        
        if pypcoeff==0:
            self.name = 'DPGMM'
        else:
            self.name = 'PYPGMM'
    def _check_parameters(self):
        """
        Check the value of the init parameter
        
        """
        
        if self.init not in ['random', 'random_sk', 'plus', 'kmeans', 'AF_KMC', 'GMM', 'VBGMM']:
            raise ValueError("Invalid value for 'init': %s "
                             "'init' should be in "
                             "['random','plus','kmeans','AF_KMC','GMM','VBGMM']"
                             % self.init)
            
        if self.pypcoeff < 0 or self.pypcoeff > 1:
            raise ValueError("Invalid value for 'pypcoeff': %s "
                             "'pypcoeff' should be between 0 and 1"
                             % self.init)
            
        if self.boost is not None :
            if self.boost < 0:
                raise ValueError("Invalid value for 'boost': %s "
                             "'boost' should be positive"
                             % self.init)
            
        if self.init == 'random_sk' and self.type_init=='mcw':
            raise ValueError("random_sk is only compatible with"
                             "type_init = resp")
                
          
    def _initialize(self,points_data,points_test=None):
        """
        This method initializes the Variational Gaussian Mixture by setting the values
        of the means, the covariances and other specific parameters (alpha, beta, nu)
        
        Parameters
        ----------
        points_data : an array (n_points,dim)
            Data on which the model is fitted.
        points_test: an array (n_points,dim) | Optional
            Data used to do early stopping (avoid overfitting)
            
        """
        
        n_points,dim = points_data.shape
        self._check_prior_parameters(points_data)
		
        if self.type_init == 'resp':
            log_assignements = initialize_log_assignements(self.init,self.n_components,points_data,
                                                           points_test)
            self._inv_prec = np.empty((self.n_components,dim,dim))
            self._log_det_inv_prec = np.empty(self.n_components)
            self.cov = np.empty((self.n_components,dim,dim))
            self.alpha = np.empty((self.n_components,2))
            self.log_weights = np.empty(self.n_components)
            self._step_M(points_data,log_assignements)
            
            # Boosting covariance matrices
            if self.boost is not None:
                self.cov *= self.boost
                self._inv_prec *= self.boost
                self._log_det_inv_prec += dim * np.log(self.boost)
        
        elif self.type_init == 'mcw':
            #Means, covariances and weights
            means,cov,log_weights = initialize_mcw(self.init,self.n_components,points_data,
                                                   points_test)
            self.cov = cov
            self.means = means
            self.log_weights = log_weights
            
            # Hyper parameters
            N = np.exp(log_weights) * n_points
            self.alpha = np.asarray([1 + N,
                          self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0))]).T
            self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components),
                                      self.pypcoeff * np.arange(self.n_components)]).T
            self.beta = self.beta_0 + N
            self.nu = self.nu_0 + N
            
            # Matrix W
            self._inv_prec = cov * self.nu[:,np.newaxis,np.newaxis]
            self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
            
        elif self.init == 'user':
            
            if self.type_init=='kmeans':
                self._initialize_cov(points_data)
            
            # Hyper parameters
            N = np.exp(self.log_weights) * n_points
            self.alpha = np.asarray([1 + N,
                          self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0)) ]).T
            self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components),
                                      self.pypcoeff * np.arange(self.n_components)]).T
            self.beta = self.beta_0 + N
            self.nu = self.nu_0 + N
            
            # Matrix W
            self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis]
            self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
            
            
        self._is_initialized = True
        
    def _step_E(self, points):
        """
        In this step the algorithm evaluates the responsibilities of each points in each cluster
        
        Parameters
        ----------
        points : an array (n_points,dim)
        
        Returns
        -------
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points
            
        """
        
        n_points,dim = points.shape
        
        log_gaussian = _log_normal_matrix(points,self.means,self.cov,'full',self.n_jobs)
        log_gaussian -= 0.5 * dim * np.log(self.nu)
        digamma_sum = np.sum(psi(.5 * (self.nu - np.arange(0, dim)[:,np.newaxis])),0)
        log_lambda = digamma_sum + dim * np.log(2)
        
        log_prob = self.log_weights + log_gaussian + 0.5 * (log_lambda - dim / self.beta)
        
        log_prob_norm = logsumexp(log_prob, axis=1)
        log_resp = log_prob - log_prob_norm[:,np.newaxis]
                    
        return log_prob_norm,log_resp
    
    def _step_M(self,points,log_resp):
        """
        In this step the algorithm updates the values of the parameters (means, covariances,
        alpha, beta, nu).
        
        Parameters
        ----------
        points : an array (n_points,dim)
        
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
            
        """
        
        n_points,dim = points.shape
        
        resp = np.exp(log_resp)
        
        # Convenient statistics
        N = np.sum(resp,axis=0) + 10*np.finfo(resp.dtype).eps            #Array (n_components,)
        X_barre = 1/N[:,np.newaxis] * np.dot(resp.T,points)              #Array (n_components,dim)
        S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs)
        
        #Parameters update
        self.alpha = np.asarray([1.0 + N,
                                  self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0))]).T
        self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components),
                                      self.pypcoeff * np.arange(self.n_components)]).T
        self.beta = self.beta_0 + N
        self.nu = self.nu_0 + N
        
        # Weights update
        for i in range(self.n_components):
            if i==0:
                self.log_weights[i] = psi(self.alpha[i][0]) - psi(np.sum(self.alpha[i]))
            else:
                self.log_weights[i] = psi(self.alpha[i][0]) - psi(np.sum(self.alpha[i]))
                self.log_weights[i] += self.log_weights[i-1] + psi(self.alpha[i-1][1]) - psi(self.alpha[i-1][0])
        
        # Means update
        means = self.beta_0 * self._means_prior + N[:,np.newaxis] * X_barre
        self.means = means * np.reciprocal(self.beta)[:,np.newaxis]
        self.means_estimated = self.means
        
        # Covariance update
        for i in range(self.n_components):
            diff = X_barre[i] - self._means_prior
            product = self.beta_0 * N[i]/self.beta[i] * np.outer(diff,diff)
            self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product
            
            det_inv_prec = np.linalg.det(self._inv_prec[i])
            self._log_det_inv_prec[i] = np.log(det_inv_prec)
            self.cov[i] = self._inv_prec[i] / self.nu[i]
        
    def _convergence_criterion_simplified(self,points,log_resp,log_prob_norm):
        """
        Compute the lower bound of the likelihood using the simplified Blei and
        Jordan formula. Can only be used with data which fits the model.
        
        
        Parameters
        ----------
        points : an array (n_points,dim)
        
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
            
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points
        
        Returns
        -------
        result : float
            the lower bound of the likelihood
            
        """
        
        resp = np.exp(log_resp)
        n_points,dim = points.shape
        
        prec = np.linalg.inv(self._inv_prec)
        prec_prior = np.linalg.inv(self._inv_prec_prior)
        
        lower_bound = np.zeros(self.n_components)
        
        for i in range(self.n_components):
            
            lower_bound[i] = _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])
            
            resp_i = resp[:,i:i+1]
            log_resp_i = log_resp[:,i:i+1]
            
            lower_bound[i] -= np.sum(resp_i*log_resp_i)
            lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))
        
        result = np.sum(lower_bound)
        result -= self.n_components * betaln(1,self.alpha_0)
        result += np.sum(betaln(self.alpha.T[0],self.alpha.T[1]))
        result -= n_points * dim * 0.5 * np.log(2*np.pi)
        
        return result
    
    
    def _convergence_criterion(self,points,log_resp,log_prob_norm):
        """
        Compute the lower bound of the likelihood using the Blei and Jordan formula.
        The formula cannot be simplified (as it is done in scikit-learn) as we also
        use it to calculate the lower bound of test points, in this case no
        simplification can be done.
          
        
        Parameters
        ----------
        points : an array (n_points,dim)
        
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
            
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points
        
        Returns
        -------
        result : float
            the lower bound of the likelihood
            
        """
        
        resp = np.exp(log_resp)
        n_points,dim = points.shape
        
        # Convenient statistics
        N = np.sum(resp,axis=0) + 10*np.finfo(resp.dtype).eps               #Array (n_components,)
        X_barre = 1/N[:,np.newaxis] * np.dot(resp.T,points)                 #Array (n_components,dim)
        S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs)
             
        prec = np.linalg.inv(self._inv_prec)
        prec_prior = np.linalg.inv(self._inv_prec_prior)
        
        lower_bound = np.zeros(self.n_components)
        
        for i in range(self.n_components):
            
            digamma_sum = np.sum(psi(.5 * (self.nu[i] - np.arange(0, dim)[:,np.newaxis])),0)
            log_det_prec_i = digamma_sum + dim * np.log(2) - self._log_det_inv_prec[i] #/!\ Inverse
            
            #First line
            lower_bound[i] = log_det_prec_i - dim/self.beta[i] - self.nu[i]*np.trace(np.dot(S[i],prec[i]))
            diff = X_barre[i] - self.means[i]
            lower_bound[i] += -self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T))
            lower_bound[i] *= 0.5 * N[i]
            
            #Second line
            lower_bound[i] += _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])
            
            resp_i = resp[:,i:i+1]
            log_resp_i = log_resp[:,i:i+1]
            
            lower_bound[i] -= np.sum(resp_i*log_resp_i)
            lower_bound[i] += 0.5 * (self.nu_0 - self.nu[i]) * log_det_prec_i
            lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))
            lower_bound[i] += dim*0.5*(1 - self.beta_0/self.beta[i] + self.nu[i])
            
            #Third line without the last term which is not summed
            diff = self.means[i] - self._means_prior
            lower_bound[i] += -0.5*self.beta_0*self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T))
            lower_bound[i] += -0.5*self.nu[i]*np.trace(np.dot(self._inv_prec_prior,prec[i]))
            
            #Terms with alpha
            lower_bound[i] += (N[i] + 1.0 - self.alpha[i,0]) * (psi(self.alpha[i,0]) - psi(np.sum(self.alpha[i])))
            lower_bound[i] += (np.sum(N[i+1::]) + self.alpha_0 - self.alpha[i,1]) * (psi(self.alpha[i,1]) - psi(np.sum(self.alpha[i])))
        
        result = np.sum(lower_bound)
        result -= self.n_components * betaln(1,self.alpha_0)
        result += np.sum(betaln(self.alpha.T[0],self.alpha.T[1]))
        result -= n_points * dim * 0.5 * np.log(2*np.pi)
        
        return result
    
    def _get_parameters(self):
        return (self.log_weights, self.means, self.cov,
                self.alpha, self.beta, self.nu)
    
    def _set_parameters(self, params,verbose=True):
        (self.log_weights, self.means, self.cov,
        self.alpha, self.beta, self.nu )= params
         
        # Matrix W
        self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis]
        self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
        if self.n_components != len(self.means) and verbose:
            print('The number of components changed')
        self.n_components = len(self.means)
        
    def _limiting_model(self,points):
        
        n_points,dim = points.shape
        log_resp = self.predict_log_resp(points)
        _,n_components = log_resp.shape
    
        exist = np.zeros(n_components)
        
        for i in range(n_points):
            for j in range(n_components):
                if np.argmax(log_resp[i])==j:
                    exist[j] = 1
        
        idx_existing = np.where(exist==1)
        
        log_weights = self.log_weights[idx_existing]
        means = self.means[idx_existing]
        cov = self.cov[idx_existing]
        alpha = self.alpha[idx_existing]
        beta = self.beta[idx_existing]
        nu = self.nu[idx_existing]
        
        params = (log_weights, means, cov,
                  alpha, beta, nu)
        
        return params | 
	apache-2.0 | 5,893,400,732,939,769,000 | 37.733583 | 137 | 0.544565 | false | 
| 
	sgarrity/bedrock | 
	lib/l10n_utils/management/commands/fluent.py | 
	8 | 
	3597 | 
	# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pathlib import Path
import textwrap
from django.core.management.base import BaseCommand
class Command(BaseCommand):
    help = 'Convert a template to use Fluent for l10n'
    requires_system_checks = False
    def add_arguments(self, parser):
        subparsers = parser.add_subparsers(
            title='subcommand', dest='subcommand'
        )
        subparsers.add_parser('help')
        recipe_parser = subparsers.add_parser(
            'recipe',
            description='Create migration recipe from template'
        )
        recipe_parser.add_argument('template', type=Path)
        ftl_parser = subparsers.add_parser(
            'ftl',
            description='Create Fluent file with existing recipe'
        )
        ftl_parser.add_argument(
            'recipe_or_template', type=Path,
            help='Path to the recipe or the template from which the recipe was generated'
        )
        ftl_parser.add_argument(
            'locales', nargs='*', default=['en'], metavar='ab-CD',
            help='Locale codes to create ftl files for'
        )
        template_parser = subparsers.add_parser(
            'template',
            description='Create template_ftl.html file with existing recipe'
        )
        template_parser.add_argument('template', type=Path)
        activation_parser = subparsers.add_parser(
            'activation',
            description='Port activation data from .lang for a recipe/template'
        )
        activation_parser.add_argument(
            'recipe_or_template', type=Path,
            help='Path to the recipe or the template from which the recipe was generated'
        )
    def handle(self, subcommand, **kwargs):
        if subcommand == 'recipe':
            return self.create_recipe(**kwargs)
        if subcommand == 'ftl':
            return self.create_ftl(**kwargs)
        if subcommand == 'template':
            return self.create_template(**kwargs)
        if subcommand == 'activation':
            return self.activation(**kwargs)
        return self.handle_help(**kwargs)
    def handle_help(self, **kwargs):
        self.stdout.write(textwrap.dedent('''\
            To migrate a template from .lang to Fluent, use the subcommands like so
            ./manage.py fluent recipe bedrock/app/templates/app/some.html
            # edit IDs in lib/fluent_migrations/app/some.py
            ./manage.py fluent template bedrock/app/templates/app/some.html
            ./manage.py fluent ftl bedrock/app/templates/app/some.html
            More documentation on https://bedrock.readthedocs.io/en/latest/fluent-conversion.html.
        '''))
    def create_recipe(self, template, **kwargs):
        from ._fluent_recipe import Recipe
        recipe = Recipe(self)
        recipe.handle(template)
    def create_template(self, template, **kwargs):
        from ._fluent_templater import Templater
        templater = Templater(self)
        templater.handle(template)
    def create_ftl(self, recipe_or_template, locales, **kwargs):
        from ._fluent_ftl import FTLCreator
        ftl_creator = FTLCreator(self)
        for locale in locales:
            ftl_creator.handle(recipe_or_template, locale)
    def activation(self, recipe_or_template, **kwargs):
        from ._fluent_activation import Activation
        activation = Activation(self)
        activation.handle(recipe_or_template)
 | 
	mpl-2.0 | -920,462,829,349,169,400 | 35.333333 | 98 | 0.627189 | false | 
| 
	kevinmel2000/brython | 
	www/src/Lib/test/unittests/test_cgitb.py | 
	113 | 
	2551 | 
	from test.support import run_unittest
from test.script_helper import assert_python_failure, temp_dir
import unittest
import sys
import cgitb
class TestCgitb(unittest.TestCase):
    def test_fonts(self):
        text = "Hello Robbie!"
        self.assertEqual(cgitb.small(text), "<small>{}</small>".format(text))
        self.assertEqual(cgitb.strong(text), "<strong>{}</strong>".format(text))
        self.assertEqual(cgitb.grey(text),
                         '<font color="#909090">{}</font>'.format(text))
    def test_blanks(self):
        self.assertEqual(cgitb.small(""), "")
        self.assertEqual(cgitb.strong(""), "")
        self.assertEqual(cgitb.grey(""), "")
    def test_html(self):
        try:
            raise ValueError("Hello World")
        except ValueError as err:
            # If the html was templated we could do a bit more here.
            # At least check that we get details on what we just raised.
            html = cgitb.html(sys.exc_info())
            self.assertIn("ValueError", html)
            self.assertIn(str(err), html)
    def test_text(self):
        try:
            raise ValueError("Hello World")
        except ValueError as err:
            text = cgitb.text(sys.exc_info())
            self.assertIn("ValueError", text)
            self.assertIn("Hello World", text)
    def test_syshook_no_logdir_default_format(self):
        with temp_dir() as tracedir:
            rc, out, err = assert_python_failure(
                  '-c',
                  ('import cgitb; cgitb.enable(logdir=%s); '
                   'raise ValueError("Hello World")') % repr(tracedir))
        out = out.decode(sys.getfilesystemencoding())
        self.assertIn("ValueError", out)
        self.assertIn("Hello World", out)
        # By default we emit HTML markup.
        self.assertIn('<p>', out)
        self.assertIn('</p>', out)
    def test_syshook_no_logdir_text_format(self):
        # Issue 12890: we were emitting the <p> tag in text mode.
        with temp_dir() as tracedir:
            rc, out, err = assert_python_failure(
                  '-c',
                  ('import cgitb; cgitb.enable(format="text", logdir=%s); '
                   'raise ValueError("Hello World")') % repr(tracedir))
        out = out.decode(sys.getfilesystemencoding())
        self.assertIn("ValueError", out)
        self.assertIn("Hello World", out)
        self.assertNotIn('<p>', out)
        self.assertNotIn('</p>', out)
def test_main():
    run_unittest(TestCgitb)
if __name__ == "__main__":
    test_main()
 | 
	bsd-3-clause | -8,993,424,027,276,596,000 | 35.442857 | 80 | 0.573501 | false | 
| 
	rhololkeolke/apo-website-devin | 
	src/application/facebook/facebook.py | 
	2 | 
	8731 | 
	"""
This module contains helper classes and methods
for the facebook integration module
.. module:: application.facebook.facebook
.. moduleauthor:: Devin Schwab <[email protected]>
"""
import facebooksdk as fb
import models
from flask import flash
class AlbumList(object):
    def __init__(self, token):
        """
        Given an an access token this class
        will get all albums for the object associated with the token
        (i.e. a page or a user)
        It will lazily construct an Album instance for each of
        the album ids returned
        """
        
        self.graph = fb.GraphAPI(token.access_token)
        albums_data = self.graph.get_connections('me', 'albums')['data']
        self.album_ids = {}
        self.album_names = {}
        for data in albums_data:
            self.album_ids[data['id']] = data
            self.album_names[data['name']] = data
    def get_albums_by_name(self, names):
        """
        Given a list of names this method will
        return album objects for each matching name.
        If a name is not found then it is silently ignored.
        This method returns a dictionary mapping name
        to Album object.
        """
        albums = {}
        for name in names:
            if name in self.album_names:
                if isinstance(self.album_names[name], Album):
                    albums[name] = self.album_names[name]
                else:
                    self.album_names[name] = Album(graph=self.graph,
                                                   album_data=self.album_names[name])
                    self.album_ids[self.album_names[name].me] = self.album_names[name]
                    albums[name] = self.album_names[name]
        return albums
    def get_albums_by_id(self, ids):
        """
        Given a list of ids this method will
        return album objects for each matching id.
        If an id is not found then it is silently ignored.
        This method returns a dictionary mapping id to
        Album object
        """
        albums = {}
        for album_id in ids:
            if album_id in self.album_ids:
                if isinstance(self.album_ids[album_id], Album):
                    albums[album_id] = self.album_ids[album_id]
                else:
                    self.album_ids[album_id] = Album(graph=self.graph,
                                                     album_data=self.album_ids[album_id])
                    self.album_names[self.album_ids[album_id].name] = self.album_ids[album_id]
                    albums[album_id] = self.album_ids[album_id]
        return albums
        
    def get_all_albums_by_id(self):
        """
        This method returns a dictionary of all
        albums with album ids as the keys
        """
        for album_id in self.album_ids:
            if not isinstance(self.album_ids[album_id], Album):
                self.album_ids[album_id] = Album(graph=self.graph,
                                                 album_data=self.album_ids[album_id])
                self.album_names[self.album_ids[album_id].name] = self.album_ids[album_id]
        return self.album_ids
    def get_all_albums_by_name(self):
        """
        This method returns a dictionary of all
        albums with album names as the keys
        """
        for name in self.album_names:
            if not isinstance(self.album_names[name], Album):
                self.album_names[name] = Album(graph=self.graph,
                                               album_data=self.album_names[name])
                self.album_ids[self.album_names[name].me] = self.album_names[name]
        return self.album_names
                
        
class Album(object):
    def __init__(self, graph=None, token=None, album_id=None, album_data=None):
        """
        Initializes a new Album object.
        If graph is provided then the graph object is saved to this
        instance.
        If the token is provided then the graph object for this token
        is created and saved to this instance.
        If both are none then an error is raised.
        If album_id is provided then the graph object is queried
        for the id and the album object populates itself with this data
        If album_data is provided then the graph object is populated
        with the data in the json derived object
        If both are None then an error is raised
        """
        if graph is None and token is None:
            raise TypeError("Either a graph object must be provided or a token must be provided")
        if graph is not None:
            self.graph = graph
            query = models.AccessTokenModel.all()
            query.filter('access_token =', graph.access_token)
            try:
                self.token = query.fetch(1)[0]
            except IndexError:
                raise TypeError('The token object provided was not an AccessTokenModel instance')
        else:
            self.graph = fb.GraphAPI(token.access_token)
            self.token = token
        if album_id is None and album_data is None:
            raise TypeError("Either an album id or a album data must be provided")
        if album_id is not None:
            album_data = self.graph.get_object(album_id)
        self.me = album_data['id']
        self.name = album_data['name']
        self.desc = album_data.get('description', None)
        self.count = album_data.get('count', 0)
        if 'cover_photo' in album_data:
            self.cover_photo = Photo(self.me, graph=self.graph, photo_id=album_data['cover_photo']).thumbnail
        else:
            self.cover_photo = None
            
    def get_model(self):
        query = models.AlbumModel.all()
        query.filter('me =', self.me)
        try:
            return  query.fetch(1)[0]
        except IndexError:
            cover_thumb = None
            if self.cover_photo is not None:
                cover_thumb = self.cover_photo
            entity = models.AlbumModel(me=self.me,
                                       token=self.token,
                                       name=self.name,
                                       desc=self.desc,
                                       cover_photo=cover_thumb)
            entity.put()
            return entity
    def get_photos(self):
        """
        Get a list of Photo objects
        """
        photos_data = self.graph.get_connections(self.me, 'photos')['data']
        
        photos = []
        for photo_data in photos_data:
            query = models.PhotoModel.all()
            query.filter('me =', photo_data['id'])
            try:
                photos.append(query.fetch(1)[0])
            except IndexError:
                name = None
                if 'name' in photo_data:
                    name = photo_data['name']
                orig = photo_data['images'][0]['source']
            
                entity = models.PhotoModel(me=photo_data['id'],
                                           album_id=self.me,
                                           name=name,
                                           thumbnail=photo_data['picture'],
                                           original=orig)
                entity.put()
                photos.append(entity)
        
        return photos
            
class Photo(object):
    def __init__(self, album_id, graph=None, token=None, photo_id=None, photo_data=None):
        if graph is None and token is None:
            raise TypeError("Either a graph object must be provided or a token must be provided")
        if graph is not None:
            self.graph = graph
        else:
            self.graph = fb.GraphAPI(token.access_token)
        if photo_id is None and photo_data is None:
            raise TypeError("Either an album id or a album data must be provided")
        if photo_id is not None:
            photo_data = self.graph.get_object(photo_id)
        self.me = photo_data['id']
        self.name = photo_data.get('name', None)
        self.thumbnail = photo_data['picture']
        self.original = photo_data['images'][0]['source']
        self.album_id = album_id
    def get_model(self):
        query = models.PhotoModel.all()
        query.filter('me =', self.me)
        try:
            return query.fetch(1)[0]
        except IndexError:
            entity = models.PhotoModel(me=self.me,
                                       album_id=self.album_id,
                                       name=self.name,
                                       thumbnail=self.thumbnail,
                                       original=self.original)
            entity.put()
            return entity
             | 
	bsd-3-clause | 2,272,791,862,150,649,300 | 33.928 | 109 | 0.529607 | false | 
| 
	korotkyn/ibis | 
	ibis/expr/tests/test_temporal.py | 
	9 | 
	5955 | 
	# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.common import IbisError
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.temporal as T
from ibis.expr.tests.mocks import MockConnection
from ibis.compat import unittest
class TestFixedOffsets(unittest.TestCase):
    def setUp(self):
        self.con = MockConnection()
        self.table = self.con.table('alltypes')
    def test_upconvert(self):
        cases = [
            (T.day(14), 'w', T.week(2)),
            (T.hour(72), 'd', T.day(3)),
            (T.minute(240), 'h', T.hour(4)),
            (T.second(360), 'm', T.minute(6)),
            (T.second(3 * 86400), 'd', T.day(3)),
            (T.millisecond(5000), 's', T.second(5)),
            (T.microsecond(5000000), 's', T.second(5)),
            (T.nanosecond(5000000000), 's', T.second(5)),
        ]
        for offset, unit, expected in cases:
            result = offset.to_unit(unit)
            assert result.equals(expected)
    def test_multiply(self):
        offset = T.day(2)
        assert (offset * 2).equals(T.day(4))
        assert (offset * (-2)).equals(T.day(-4))
        assert (3 * offset).equals(T.day(6))
        assert ((-3) * offset).equals(T.day(-6))
    def test_repr(self):
        assert repr(T.day()) == '<Timedelta: 1 day>'
        assert repr(T.day(2)) == '<Timedelta: 2 days>'
        assert repr(T.year()) == '<Timedelta: 1 year>'
        assert repr(T.month(2)) == '<Timedelta: 2 months>'
        assert repr(T.second(40)) == '<Timedelta: 40 seconds>'
    def test_cannot_upconvert(self):
        cases = [
            (T.day(), 'w'),
            (T.hour(), 'd'),
            (T.minute(), 'h'),
            (T.second(), 'm'),
            (T.second(), 'd'),
            (T.millisecond(), 's'),
            (T.microsecond(), 's'),
            (T.nanosecond(), 's'),
        ]
        for delta, target in cases:
            self.assertRaises(IbisError, delta.to_unit, target)
    def test_downconvert_second_parts(self):
        K = 2
        sec = T.second(K)
        milli = T.millisecond(K)
        micro = T.microsecond(K)
        nano = T.nanosecond(K)
        cases = [
            (sec.to_unit('s'), T.second(K)),
            (sec.to_unit('ms'), T.millisecond(K * 1000)),
            (sec.to_unit('us'), T.microsecond(K * 1000000)),
            (sec.to_unit('ns'), T.nanosecond(K * 1000000000)),
            (milli.to_unit('ms'), T.millisecond(K)),
            (milli.to_unit('us'), T.microsecond(K * 1000)),
            (milli.to_unit('ns'), T.nanosecond(K * 1000000)),
            (micro.to_unit('us'), T.microsecond(K)),
            (micro.to_unit('ns'), T.nanosecond(K * 1000)),
            (nano.to_unit('ns'), T.nanosecond(K))
        ]
        self._check_cases(cases)
    def test_downconvert_hours(self):
        K = 2
        offset = T.hour(K)
        cases = [
            (offset.to_unit('h'), T.hour(K)),
            (offset.to_unit('m'), T.minute(K * 60)),
            (offset.to_unit('s'), T.second(K * 3600)),
            (offset.to_unit('ms'), T.millisecond(K * 3600000)),
            (offset.to_unit('us'), T.microsecond(K * 3600000000)),
            (offset.to_unit('ns'), T.nanosecond(K * 3600000000000))
        ]
        self._check_cases(cases)
    def test_downconvert_day(self):
        K = 2
        week = T.week(K)
        day = T.day(K)
        cases = [
            (week.to_unit('d'), T.day(K * 7)),
            (week.to_unit('h'), T.hour(K * 7 * 24)),
            (day.to_unit('d'), T.day(K)),
            (day.to_unit('h'), T.hour(K * 24)),
            (day.to_unit('m'), T.minute(K * 1440)),
            (day.to_unit('s'), T.second(K * 86400)),
            (day.to_unit('ms'), T.millisecond(K * 86400000)),
            (day.to_unit('us'), T.microsecond(K * 86400000000)),
            (day.to_unit('ns'), T.nanosecond(K * 86400000000000))
        ]
        self._check_cases(cases)
    def test_combine_with_different_kinds(self):
        cases = [
            (T.day() + T.minute(), T.minute(1441)),
            (T.second() + T.millisecond(10), T.millisecond(1010)),
            (T.hour() + T.minute(5) + T.second(10), T.second(3910))
        ]
        self._check_cases(cases)
    def test_timedelta_generic_api(self):
        cases = [
            (T.timedelta(weeks=2), T.week(2)),
            (T.timedelta(days=3), T.day(3)),
            (T.timedelta(hours=4), T.hour(4)),
            (T.timedelta(minutes=5), T.minute(5)),
            (T.timedelta(seconds=6), T.second(6)),
            (T.timedelta(milliseconds=7), T.millisecond(7)),
            (T.timedelta(microseconds=8), T.microsecond(8)),
            (T.timedelta(nanoseconds=9), T.nanosecond(9)),
        ]
        self._check_cases(cases)
    def _check_cases(self, cases):
        for x, y in cases:
            assert x.equals(y)
    def test_offset_timestamp_expr(self):
        c = self.table.i
        x = T.timedelta(days=1)
        expr = x + c
        assert isinstance(expr, ir.TimestampArray)
        assert isinstance(expr.op(), ops.TimestampDelta)
        # test radd
        expr = c + x
        assert isinstance(expr, ir.TimestampArray)
        assert isinstance(expr.op(), ops.TimestampDelta)
class TestTimedelta(unittest.TestCase):
    def test_compound_offset(self):
        # These are not yet allowed (e.g. 1 month + 1 hour)
        pass
    def test_offset_months(self):
        pass
 | 
	apache-2.0 | 9,129,429,547,262,664,000 | 31.900552 | 74 | 0.537196 | false | 
| 
	hectord/lettuce | 
	tests/integration/lib/Django-1.2.5/django/core/handlers/base.py | 
	44 | 
	9926 | 
	import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
    # Changes that are always applied to a response (in this order).
    response_fixes = [
        http.fix_location_header,
        http.conditional_content_removal,
        http.fix_IE_for_attach,
        http.fix_IE_for_vary,
    ]
    def __init__(self):
        self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
    def load_middleware(self):
        """
        Populate middleware lists from settings.MIDDLEWARE_CLASSES.
        Must be called after the environment is fixed (see __call__).
        """
        from django.conf import settings
        from django.core import exceptions
        self._view_middleware = []
        self._response_middleware = []
        self._exception_middleware = []
        request_middleware = []
        for middleware_path in settings.MIDDLEWARE_CLASSES:
            try:
                dot = middleware_path.rindex('.')
            except ValueError:
                raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
            mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
            try:
                mod = import_module(mw_module)
            except ImportError, e:
                raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
            try:
                mw_class = getattr(mod, mw_classname)
            except AttributeError:
                raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
            try:
                mw_instance = mw_class()
            except exceptions.MiddlewareNotUsed:
                continue
            if hasattr(mw_instance, 'process_request'):
                request_middleware.append(mw_instance.process_request)
            if hasattr(mw_instance, 'process_view'):
                self._view_middleware.append(mw_instance.process_view)
            if hasattr(mw_instance, 'process_response'):
                self._response_middleware.insert(0, mw_instance.process_response)
            if hasattr(mw_instance, 'process_exception'):
                self._exception_middleware.insert(0, mw_instance.process_exception)
        # We only assign to this when initialization is complete as it is used
        # as a flag for initialization being complete.
        self._request_middleware = request_middleware
    def get_response(self, request):
        "Returns an HttpResponse object for the given HttpRequest"
        from django.core import exceptions, urlresolvers
        from django.conf import settings
        try:
            try:
                # Setup default url resolver for this thread.
                urlconf = settings.ROOT_URLCONF
                urlresolvers.set_urlconf(urlconf)
                resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
                # Apply request middleware
                for middleware_method in self._request_middleware:
                    response = middleware_method(request)
                    if response:
                        return response
                if hasattr(request, "urlconf"):
                    # Reset url resolver with a custom urlconf.
                    urlconf = request.urlconf
                    urlresolvers.set_urlconf(urlconf)
                    resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
                callback, callback_args, callback_kwargs = resolver.resolve(
                        request.path_info)
                # Apply view middleware
                for middleware_method in self._view_middleware:
                    response = middleware_method(request, callback, callback_args, callback_kwargs)
                    if response:
                        return response
                try:
                    response = callback(request, *callback_args, **callback_kwargs)
                except Exception, e:
                    # If the view raised an exception, run it through exception
                    # middleware, and if the exception middleware returns a
                    # response, use that. Otherwise, reraise the exception.
                    for middleware_method in self._exception_middleware:
                        response = middleware_method(request, e)
                        if response:
                            return response
                    raise
                # Complain if the view returned None (a common error).
                if response is None:
                    try:
                        view_name = callback.func_name # If it's a function
                    except AttributeError:
                        view_name = callback.__class__.__name__ + '.__call__' # If it's a class
                    raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
                return response
            except http.Http404, e:
                if settings.DEBUG:
                    from django.views import debug
                    return debug.technical_404_response(request, e)
                else:
                    try:
                        callback, param_dict = resolver.resolve404()
                        return callback(request, **param_dict)
                    except:
                        try:
                            return self.handle_uncaught_exception(request, resolver, sys.exc_info())
                        finally:
                            receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
            except exceptions.PermissionDenied:
                return http.HttpResponseForbidden('<h1>Permission denied</h1>')
            except SystemExit:
                # Allow sys.exit() to actually exit. See tickets #1023 and #4701
                raise
            except: # Handle everything else, including SuspiciousOperation, etc.
                # Get the exception info now, in case another exception is thrown later.
                receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
                return self.handle_uncaught_exception(request, resolver, sys.exc_info())
        finally:
            # Reset URLconf for this thread on the way out for complete
            # isolation of request.urlconf
            urlresolvers.set_urlconf(None)
    def handle_uncaught_exception(self, request, resolver, exc_info):
        """
        Processing for any otherwise uncaught exceptions (those that will
        generate HTTP 500 responses). Can be overridden by subclasses who want
        customised 500 handling.
        Be *very* careful when overriding this because the error could be
        caused by anything, so assuming something like the database is always
        available would be an error.
        """
        from django.conf import settings
        from django.core.mail import mail_admins
        if settings.DEBUG_PROPAGATE_EXCEPTIONS:
            raise
        if settings.DEBUG:
            from django.views import debug
            return debug.technical_500_response(request, *exc_info)
        # When DEBUG is False, send an error message to the admins.
        subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
        try:
            request_repr = repr(request)
        except:
            request_repr = "Request repr() unavailable"
        message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
        mail_admins(subject, message, fail_silently=True)
        # If Http500 handler is not installed, re-raise last exception
        if resolver.urlconf_module is None:
            raise exc_info[1], None, exc_info[2]
        # Return an HttpResponse that displays a friendly error message.
        callback, param_dict = resolver.resolve500()
        return callback(request, **param_dict)
    def _get_traceback(self, exc_info=None):
        "Helper function to return the traceback as a string"
        import traceback
        return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
    def apply_response_fixes(self, request, response):
        """
        Applies each of the functions in self.response_fixes to the request and
        response, modifying the response in the process. Returns the new
        response.
        """
        for func in self.response_fixes:
            response = func(request, response)
        return response
def get_script_name(environ):
    """
    Returns the equivalent of the HTTP request's SCRIPT_NAME environment
    variable. If Apache mod_rewrite has been used, returns what would have been
    the script name prior to any rewriting (so it's the script name as seen
    from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
    anything).
    """
    from django.conf import settings
    if settings.FORCE_SCRIPT_NAME is not None:
        return force_unicode(settings.FORCE_SCRIPT_NAME)
    # If Apache's mod_rewrite had a whack at the URL, Apache set either
    # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
    # rewrites. Unfortunately not every Web server (lighttpd!) passes this
    # information through all the time, so FORCE_SCRIPT_NAME, above, is still
    # needed.
    script_url = environ.get('SCRIPT_URL', u'')
    if not script_url:
        script_url = environ.get('REDIRECT_URL', u'')
    if script_url:
        return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
    return force_unicode(environ.get('SCRIPT_NAME', u''))
 | 
	gpl-3.0 | 7,864,434,311,480,931,000 | 44.118182 | 143 | 0.598428 | false | 
| 
	ARM-software/mbed-beetle | 
	tools/host_tests/host_tests_plugins/module_copy_smart.py | 
	2 | 
	4378 | 
	"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from os.path import join, basename, exists, abspath, dirname
from time import sleep
from host_test_plugins import HostTestPluginBase
sys.path.append(abspath(join(dirname(__file__), "../../../")))
from tools.test_api import get_autodetected_MUTS_list
class HostTestPluginCopyMethod_Smart(HostTestPluginBase):
    # Plugin interface
    name = 'HostTestPluginCopyMethod_Smart'
    type = 'CopyMethod'
    stable = True
    capabilities = ['smart']
    required_parameters = ['image_path', 'destination_disk', 'target_mcu']
    def setup(self, *args, **kwargs):
        """ Configure plugin, this function should be called before plugin execute() method is used.
        """
        return True
    def execute(self, capability, *args, **kwargs):
        """ Executes capability by name.
            Each capability may directly just call some command line
            program or execute building pythonic function
        """
        result = False
        if self.check_parameters(capability, *args, **kwargs) is True:
            image_path = kwargs['image_path']
            destination_disk = kwargs['destination_disk']
            target_mcu = kwargs['target_mcu']
            # Wait for mount point to be ready
            self.check_mount_point_ready(destination_disk)  # Blocking
            # Prepare correct command line parameter values
            image_base_name = basename(image_path)
            destination_path = join(destination_disk, image_base_name)
            if capability == 'smart':
                if os.name == 'posix':
                    cmd = ['cp', image_path, destination_path]
                    result = self.run_command(cmd, shell=False)
                    cmd = ['sync']
                    result = self.run_command(cmd, shell=False)
                elif os.name == 'nt':
                    cmd = ['copy', image_path, destination_path]
                    result = self.run_command(cmd, shell=True)
                # Give the OS and filesystem time to settle down
                sleep(3)
                platform_name_filter = [target_mcu]
                muts_list = {}
                remount_complete = False
                for i in range(0, 60):
                    print('Looking for %s with MBEDLS' % target_mcu)
                    muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
                    if 1 in muts_list:
                        mut = muts_list[1]
                        destination_disk = mut['disk']
                        destination_path = join(destination_disk, image_base_name)
                        if mut['mcu'] == 'LPC1768' or mut['mcu'] == 'LPC11U24':
                            if exists(destination_disk) and exists(destination_path):
                                remount_complete = True
                                break;
                        else:
                            if exists(destination_disk) and not exists(destination_path):
                                remount_complete = True
                                break;
                    sleep(1)
                if remount_complete:
                    print('Remount complete')
                else:
                    print('Remount FAILED')
                    if exists(destination_disk):
                        print('Disk exists')
                    else:
                        print('Disk does not exist')
                    if exists(destination_path):
                        print('Image exists')
                    else:
                        print('Image does not exist')
                    result = None
        return result
def load_plugin():
    """ Returns plugin available in this module
    """
    return HostTestPluginCopyMethod_Smart()
 | 
	apache-2.0 | 6,523,961,272,627,055,000 | 36.101695 | 101 | 0.553906 | false | 
| 
	ahmadio/edx-platform | 
	lms/lib/courseware_search/lms_filter_generator.py | 
	58 | 
	5634 | 
	"""
This file contains implementation override of SearchFilterGenerator which will allow
    * Filter by all courses in which the user is enrolled in
"""
from microsite_configuration import microsite
from student.models import CourseEnrollment
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from search.filter_generator import SearchFilterGenerator
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme
from courseware.access import get_user_role
INCLUDE_SCHEMES = [CohortPartitionScheme, RandomUserPartitionScheme, ]
SCHEME_SUPPORTS_ASSIGNMENT = [RandomUserPartitionScheme, ]
class LmsSearchFilterGenerator(SearchFilterGenerator):
    """ SearchFilterGenerator for LMS Search """
    _user_enrollments = {}
    def _enrollments_for_user(self, user):
        """ Return the specified user's course enrollments """
        if user not in self._user_enrollments:
            self._user_enrollments[user] = CourseEnrollment.enrollments_for_user(user)
        return self._user_enrollments[user]
    def filter_dictionary(self, **kwargs):
        """ LMS implementation, adds filtering by user partition, course id and user """
        def get_group_for_user_partition(user_partition, course_key, user):
            """ Returns the specified user's group for user partition """
            if user_partition.scheme in SCHEME_SUPPORTS_ASSIGNMENT:
                return user_partition.scheme.get_group_for_user(
                    course_key,
                    user,
                    user_partition,
                    assign=False,
                )
            else:
                return user_partition.scheme.get_group_for_user(
                    course_key,
                    user,
                    user_partition,
                )
        def get_group_ids_for_user(course, user):
            """ Collect user partition group ids for user for this course """
            partition_groups = []
            for user_partition in course.user_partitions:
                if user_partition.scheme in INCLUDE_SCHEMES:
                    group = get_group_for_user_partition(user_partition, course.id, user)
                    if group:
                        partition_groups.append(group)
            partition_group_ids = [unicode(partition_group.id) for partition_group in partition_groups]
            return partition_group_ids if partition_group_ids else None
        filter_dictionary = super(LmsSearchFilterGenerator, self).filter_dictionary(**kwargs)
        if 'user' in kwargs:
            user = kwargs['user']
            if 'course_id' in kwargs and kwargs['course_id']:
                try:
                    course_key = CourseKey.from_string(kwargs['course_id'])
                except InvalidKeyError:
                    course_key = SlashSeparatedCourseKey.from_deprecated_string(kwargs['course_id'])
                # Staff user looking at course as staff user
                if get_user_role(user, course_key) in ('instructor', 'staff'):
                    return filter_dictionary
                # Need to check course exist (if course gets deleted enrollments don't get cleaned up)
                course = modulestore().get_course(course_key)
                if course:
                    filter_dictionary['content_groups'] = get_group_ids_for_user(course, user)
            else:
                user_enrollments = self._enrollments_for_user(user)
                content_groups = []
                for enrollment in user_enrollments:
                    course = modulestore().get_course(enrollment.course_id)
                    if course:
                        enrollment_group_ids = get_group_ids_for_user(course, user)
                        if enrollment_group_ids:
                            content_groups.extend(enrollment_group_ids)
                filter_dictionary['content_groups'] = content_groups if content_groups else None
        return filter_dictionary
    def field_dictionary(self, **kwargs):
        """ add course if provided otherwise add courses in which the user is enrolled in """
        field_dictionary = super(LmsSearchFilterGenerator, self).field_dictionary(**kwargs)
        if not kwargs.get('user'):
            field_dictionary['course'] = []
        elif not kwargs.get('course_id'):
            user_enrollments = self._enrollments_for_user(kwargs['user'])
            field_dictionary['course'] = [unicode(enrollment.course_id) for enrollment in user_enrollments]
        # if we have an org filter, only include results for this org filter
        course_org_filter = microsite.get_value('course_org_filter')
        if course_org_filter:
            field_dictionary['org'] = course_org_filter
        return field_dictionary
    def exclude_dictionary(self, **kwargs):
        """ If we are not on a microsite, then exclude any microsites that are defined """
        exclude_dictionary = super(LmsSearchFilterGenerator, self).exclude_dictionary(**kwargs)
        course_org_filter = microsite.get_value('course_org_filter')
        # If we have a course filter we are ensuring that we only get those courses above
        if not course_org_filter:
            org_filter_out_set = microsite.get_all_orgs()
            if org_filter_out_set:
                exclude_dictionary['org'] = list(org_filter_out_set)
        return exclude_dictionary
 | 
	agpl-3.0 | 7,445,365,950,426,380,000 | 45.561983 | 107 | 0.63241 | false | 
| 
	goliate/sarakha63-persomov | 
	couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/twentyfourvideo.py | 
	32 | 
	3892 | 
	# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
    parse_iso8601,
    int_or_none,
)
class TwentyFourVideoIE(InfoExtractor):
    IE_NAME = '24video'
    _VALID_URL = r'https?://(?:www\.)?24video\.net/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P<id>\d+)'
    _TESTS = [
        {
            'url': 'http://www.24video.net/video/view/1044982',
            'md5': '48dd7646775690a80447a8dca6a2df76',
            'info_dict': {
                'id': '1044982',
                'ext': 'mp4',
                'title': 'Эротика каменного века',
                'description': 'Как смотрели порно в каменном веке.',
                'thumbnail': 're:^https?://.*\.jpg$',
                'uploader': 'SUPERTELO',
                'duration': 31,
                'timestamp': 1275937857,
                'upload_date': '20100607',
                'age_limit': 18,
                'like_count': int,
                'dislike_count': int,
            },
        },
        {
            'url': 'http://www.24video.net/player/new24_play.swf?id=1044982',
            'only_matching': True,
        }
    ]
    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(
            'http://www.24video.net/video/view/%s' % video_id, video_id)
        title = self._og_search_title(webpage)
        description = self._html_search_regex(
            r'<span itemprop="description">([^<]+)</span>', webpage, 'description', fatal=False)
        thumbnail = self._og_search_thumbnail(webpage)
        duration = int_or_none(self._og_search_property(
            'duration', webpage, 'duration', fatal=False))
        timestamp = parse_iso8601(self._search_regex(
            r'<time id="video-timeago" datetime="([^"]+)" itemprop="uploadDate">',
            webpage, 'upload date'))
        uploader = self._html_search_regex(
            r'Загрузил\s*<a href="/jsecUser/movies/[^"]+" class="link">([^<]+)</a>',
            webpage, 'uploader', fatal=False)
        view_count = int_or_none(self._html_search_regex(
            r'<span class="video-views">(\d+) просмотр',
            webpage, 'view count', fatal=False))
        comment_count = int_or_none(self._html_search_regex(
            r'<div class="comments-title" id="comments-count">(\d+) комментари',
            webpage, 'comment count', fatal=False))
        formats = []
        pc_video = self._download_xml(
            'http://www.24video.net/video/xml/%s?mode=play' % video_id,
            video_id, 'Downloading PC video URL').find('.//video')
        formats.append({
            'url': pc_video.attrib['url'],
            'format_id': 'pc',
            'quality': 1,
        })
        like_count = int_or_none(pc_video.get('ratingPlus'))
        dislike_count = int_or_none(pc_video.get('ratingMinus'))
        age_limit = 18 if pc_video.get('adult') == 'true' else 0
        mobile_video = self._download_xml(
            'http://www.24video.net/video/xml/%s' % video_id,
            video_id, 'Downloading mobile video URL').find('.//video')
        formats.append({
            'url': mobile_video.attrib['url'],
            'format_id': 'mobile',
            'quality': 0,
        })
        self._sort_formats(formats)
        return {
            'id': video_id,
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
            'uploader': uploader,
            'duration': duration,
            'timestamp': timestamp,
            'view_count': view_count,
            'comment_count': comment_count,
            'like_count': like_count,
            'dislike_count': dislike_count,
            'age_limit': age_limit,
            'formats': formats,
        }
 | 
	gpl-3.0 | 5,228,835,369,578,570,000 | 34.018349 | 114 | 0.517422 | false | 
| 
	longman694/youtube-dl | 
	youtube_dl/extractor/watchbox.py | 
	14 | 
	5539 | 
	# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
    int_or_none,
    js_to_json,
    strip_or_none,
    try_get,
    unified_timestamp,
)
class WatchBoxIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?watchbox\.de/(?P<kind>serien|filme)/(?:[^/]+/)*[^/]+-(?P<id>\d+)'
    _TESTS = [{
        # film
        'url': 'https://www.watchbox.de/filme/free-jimmy-12325.html',
        'info_dict': {
            'id': '341368',
            'ext': 'mp4',
            'title': 'Free Jimmy',
            'description': 'md5:bcd8bafbbf9dc0ef98063d344d7cc5f6',
            'thumbnail': r're:^https?://.*\.jpg$',
            'duration': 4890,
            'age_limit': 16,
            'release_year': 2009,
        },
        'params': {
            'format': 'bestvideo',
            'skip_download': True,
        },
        'expected_warnings': ['Failed to download m3u8 information'],
    }, {
        # episode
        'url': 'https://www.watchbox.de/serien/ugly-americans-12231/staffel-1/date-in-der-hoelle-328286.html',
        'info_dict': {
            'id': '328286',
            'ext': 'mp4',
            'title': 'S01 E01 - Date in der Hölle',
            'description': 'md5:2f31c74a8186899f33cb5114491dae2b',
            'thumbnail': r're:^https?://.*\.jpg$',
            'duration': 1291,
            'age_limit': 12,
            'release_year': 2010,
            'series': 'Ugly Americans',
            'season_number': 1,
            'episode': 'Date in der Hölle',
            'episode_number': 1,
        },
        'params': {
            'format': 'bestvideo',
            'skip_download': True,
        },
        'expected_warnings': ['Failed to download m3u8 information'],
    }, {
        'url': 'https://www.watchbox.de/serien/ugly-americans-12231/staffel-2/der-ring-des-powers-328270',
        'only_matching': True,
    }]
    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        kind, video_id = mobj.group('kind', 'id')
        webpage = self._download_webpage(url, video_id)
        source = self._parse_json(
            self._search_regex(
                r'(?s)source\s*:\s*({.+?})\s*,\s*\n', webpage, 'source',
                default='{}'),
            video_id, transform_source=js_to_json, fatal=False) or {}
        video_id = compat_str(source.get('videoId') or video_id)
        devapi = self._download_json(
            'http://api.watchbox.de/devapi/id/%s' % video_id, video_id, query={
                'format': 'json',
                'apikey': 'hbbtv',
            }, fatal=False)
        item = try_get(devapi, lambda x: x['items'][0], dict) or {}
        title = item.get('title') or try_get(
            item, lambda x: x['movie']['headline_movie'],
            compat_str) or source['title']
        formats = []
        hls_url = item.get('media_videourl_hls') or source.get('hls')
        if hls_url:
            formats.extend(self._extract_m3u8_formats(
                hls_url, video_id, 'mp4', entry_protocol='m3u8_native',
                m3u8_id='hls', fatal=False))
        dash_url = item.get('media_videourl_wv') or source.get('dash')
        if dash_url:
            formats.extend(self._extract_mpd_formats(
                dash_url, video_id, mpd_id='dash', fatal=False))
        mp4_url = item.get('media_videourl')
        if mp4_url:
            formats.append({
                'url': mp4_url,
                'format_id': 'mp4',
                'width': int_or_none(item.get('width')),
                'height': int_or_none(item.get('height')),
                'tbr': int_or_none(item.get('bitrate')),
            })
        self._sort_formats(formats)
        description = strip_or_none(item.get('descr'))
        thumbnail = item.get('media_content_thumbnail_large') or source.get('poster') or item.get('media_thumbnail')
        duration = int_or_none(item.get('media_length') or source.get('length'))
        timestamp = unified_timestamp(item.get('pubDate'))
        view_count = int_or_none(item.get('media_views'))
        age_limit = int_or_none(try_get(item, lambda x: x['movie']['fsk']))
        release_year = int_or_none(try_get(item, lambda x: x['movie']['rel_year']))
        info = {
            'id': video_id,
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
            'duration': duration,
            'timestamp': timestamp,
            'view_count': view_count,
            'age_limit': age_limit,
            'release_year': release_year,
            'formats': formats,
        }
        if kind.lower() == 'serien':
            series = try_get(
                item, lambda x: x['special']['title'],
                compat_str) or source.get('format')
            season_number = int_or_none(self._search_regex(
                r'^S(\d{1,2})\s*E\d{1,2}', title, 'season number',
                default=None) or self._search_regex(
                    r'/staffel-(\d+)/', url, 'season number', default=None))
            episode = source.get('title')
            episode_number = int_or_none(self._search_regex(
                r'^S\d{1,2}\s*E(\d{1,2})', title, 'episode number',
                default=None))
            info.update({
                'series': series,
                'season_number': season_number,
                'episode': episode,
                'episode_number': episode_number,
            })
        return info
 | 
	unlicense | 2,893,657,285,305,545,700 | 35.668874 | 116 | 0.506773 | false | 
| 
	mesocentrefc/easybuild-framework | 
	easybuild/tools/version.py | 
	2 | 
	2926 | 
	##
# Copyright 2009-2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild.  If not, see <http://www.gnu.org/licenses/>.
##
"""
Module that takes control of versioning.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from distutils.version import LooseVersion
from socket import gethostname
# note: release candidates should be versioned as a pre-release, e.g. "1.1rc1"
# 1.1-rc1 would indicate a post-release, i.e., and update of 1.1, so beware!
VERSION = LooseVersion("1.14.0")
UNKNOWN = "UNKNOWN"
def get_git_revision():
    """
    Returns the git revision (e.g. aab4afc016b742c6d4b157427e192942d0e131fe),
    or UNKNOWN is getting the git revision fails
    relies on GitPython (see http://gitorious.org/git-python)
    """
    try:
        import git
    except ImportError:
        return UNKNOWN
    try:
        path = os.path.dirname(__file__)
        gitrepo = git.Git(path)
        return gitrepo.rev_list("HEAD").splitlines()[0]
    except git.GitCommandError:
        return UNKNOWN
git_rev = get_git_revision()
if git_rev == UNKNOWN:
    VERBOSE_VERSION = VERSION
else:
    VERBOSE_VERSION = LooseVersion("%s-r%s" % (VERSION, get_git_revision()))
# alias
FRAMEWORK_VERSION = VERBOSE_VERSION
# EasyBlock version
try:
    from easybuild.easyblocks import VERBOSE_VERSION as EASYBLOCKS_VERSION
except:
    EASYBLOCKS_VERSION = '0.0.UNKNOWN.EASYBLOCKS'  # make sure it is smaller then anything
def this_is_easybuild():
    """Standard starting message"""
    top_version = max(FRAMEWORK_VERSION, EASYBLOCKS_VERSION)
    # !!! bootstrap_eb.py script checks hard on the string below, so adjust with sufficient care !!!
    msg = "This is EasyBuild %s (framework: %s, easyblocks: %s) on host %s." \
         % (top_version, FRAMEWORK_VERSION, EASYBLOCKS_VERSION, gethostname())
    return msg
 | 
	gpl-2.0 | -6,282,021,567,314,024,000 | 34.253012 | 100 | 0.719754 | false | 
| 
	kinghaitao/git-core | 
	scripts/rt-tester/rt-tester.py | 
	904 | 
	5366 | 
	#!/usr/bin/env python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
    "schedother"    : "1",
    "schedfifo"     : "2",
    "lock"          : "3",
    "locknowait"    : "4",
    "lockint"       : "5",
    "lockintnowait" : "6",
    "lockcont"      : "7",
    "unlock"        : "8",
    "lockbkl"       : "9",
    "unlockbkl"     : "10",
    "signal"        : "11",
    "resetevent"    : "98",
    "reset"         : "99",
    }
test_opcodes = {
    "prioeq"        : ["P" , "eq" , None],
    "priolt"        : ["P" , "lt" , None],
    "priogt"        : ["P" , "gt" , None],
    "nprioeq"       : ["N" , "eq" , None],
    "npriolt"       : ["N" , "lt" , None],
    "npriogt"       : ["N" , "gt" , None],
    "unlocked"      : ["M" , "eq" , 0],
    "trylock"       : ["M" , "eq" , 1],
    "blocked"       : ["M" , "eq" , 2],
    "blockedwake"   : ["M" , "eq" , 3],
    "locked"        : ["M" , "eq" , 4],
    "opcodeeq"      : ["O" , "eq" , None],
    "opcodelt"      : ["O" , "lt" , None],
    "opcodegt"      : ["O" , "gt" , None],
    "eventeq"       : ["E" , "eq" , None],
    "eventlt"       : ["E" , "lt" , None],
    "eventgt"       : ["E" , "gt" , None],
    }
# Print usage information
def usage():
    print "rt-tester.py <-c -h -q -t> <testfile>"
    print " -c    display comments after first command"
    print " -h    help"
    print " -q    quiet mode"
    print " -t    test mode (syntax check)"
    print " testfile: read test specification from testfile"
    print " otherwise from stdin"
    return
# Print progress when not in quiet mode
def progress(str):
    if not quiet:
        print str
# Analyse a status value
def analyse(val, top, arg):
    intval = int(val)
    if top[0] == "M":
        intval = intval / (10 ** int(arg))
	intval = intval % 10
        argval = top[2]
    elif top[0] == "O":
        argval = int(cmd_opcodes.get(arg, arg))
    else:
        argval = int(arg)
    # progress("%d %s %d" %(intval, top[1], argval))
    if top[1] == "eq" and intval == argval:
	return 1
    if top[1] == "lt" and intval < argval:
        return 1
    if top[1] == "gt" and intval > argval:
	return 1
    return 0
# Parse the commandline
try:
    (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
    usage()
    sys.exit(1)
# Parse commandline options
for option, value in options:
    if option == "-c":
        comments = 1
    elif option == "-q":
        quiet = 1
    elif option == "-t":
        test = 1
    elif option == '-h':
        usage()
        sys.exit(0)
# Select the input source
if arguments:
    try:
        fd = open(arguments[0])
    except Exception,ex:
        sys.stderr.write("File not found %s\n" %(arguments[0]))
        sys.exit(1)
else:
    fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
    linenr = linenr + 1
    line = fd.readline()
    if not len(line):
        break
    line = line.strip()
    parts = line.split(":")
    if not parts or len(parts) < 1:
        continue
    if len(parts[0]) == 0:
        continue
    if parts[0].startswith("#"):
	if comments > 1:
	    progress(line)
	continue
    if comments == 1:
	comments = 2
    progress(line)
    cmd = parts[0].strip().lower()
    opc = parts[1].strip().lower()
    tid = parts[2].strip()
    dat = parts[3].strip()
    try:
        # Test or wait for a status value
        if cmd == "t" or cmd == "w":
            testop = test_opcodes[opc]
            fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
            if test:
		print fname
                continue
            while 1:
                query = 1
                fsta = open(fname, 'r')
                status = fsta.readline().strip()
                fsta.close()
                stat = status.split(",")
                for s in stat:
		    s = s.strip()
                    if s.startswith(testop[0]):
                        # Seperate status value
                        val = s[2:].strip()
                        query = analyse(val, testop, dat)
                        break
                if query or cmd == "t":
                    break
            progress("   " + status)
            if not query:
                sys.stderr.write("Test failed in line %d\n" %(linenr))
		sys.exit(1)
        # Issue a command to the tester
        elif cmd == "c":
            cmdnr = cmd_opcodes[opc]
            # Build command string and sys filename
            cmdstr = "%s:%s" %(cmdnr, dat)
            fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
            if test:
		print fname
                continue
            fcmd = open(fname, 'w')
            fcmd.write(cmdstr)
            fcmd.close()
    except Exception,ex:
    	sys.stderr.write(str(ex))
        sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
        if not test:
            fd.close()
            sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
 | 
	gpl-2.0 | -8,372,066,044,332,046,000 | 23.171171 | 70 | 0.493291 | false | 
| 
	broferek/ansible | 
	test/units/modules/network/f5/test_bigip_service_policy.py | 
	38 | 
	4128 | 
	# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
    pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
    from library.modules.bigip_service_policy import ApiParameters
    from library.modules.bigip_service_policy import ModuleParameters
    from library.modules.bigip_service_policy import ModuleManager
    from library.modules.bigip_service_policy import ArgumentSpec
    # In Ansible 2.8, Ansible changed import paths.
    from test.units.compat import unittest
    from test.units.compat.mock import Mock
    from test.units.compat.mock import patch
    from test.units.modules.utils import set_module_args
except ImportError:
    from ansible.modules.network.f5.bigip_service_policy import ApiParameters
    from ansible.modules.network.f5.bigip_service_policy import ModuleParameters
    from ansible.modules.network.f5.bigip_service_policy import ModuleManager
    from ansible.modules.network.f5.bigip_service_policy import ArgumentSpec
    # Ansible 2.8 imports
    from units.compat import unittest
    from units.compat.mock import Mock
    from units.compat.mock import patch
    from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
    path = os.path.join(fixture_path, name)
    if path in fixture_data:
        return fixture_data[path]
    with open(path) as f:
        data = f.read()
    try:
        data = json.loads(data)
    except Exception:
        pass
    fixture_data[path] = data
    return data
class TestParameters(unittest.TestCase):
    def test_module_parameters(self):
        args = dict(
            name='foo',
            description='my description',
            timer_policy='timer1',
            port_misuse_policy='misuse1',
        )
        p = ModuleParameters(params=args)
        assert p.name == 'foo'
        assert p.description == 'my description'
        assert p.timer_policy == '/Common/timer1'
        assert p.port_misuse_policy == '/Common/misuse1'
    def test_api_parameters(self):
        args = load_fixture('load_net_service_policy_1.json')
        p = ApiParameters(params=args)
        assert p.name == 'baz'
        assert p.description == 'my description'
        assert p.timer_policy == '/Common/foo'
        assert p.port_misuse_policy == '/Common/bar'
class TestManager(unittest.TestCase):
    def setUp(self):
        self.spec = ArgumentSpec()
        try:
            self.p1 = patch('library.modules.bigip_service_policy.module_provisioned')
            self.m1 = self.p1.start()
            self.m1.return_value = True
        except Exception:
            self.p1 = patch('ansible.modules.network.f5.bigip_service_policy.module_provisioned')
            self.m1 = self.p1.start()
            self.m1.return_value = True
    def test_create_selfip(self, *args):
        set_module_args(dict(
            name='foo',
            description='my description',
            timer_policy='timer1',
            port_misuse_policy='misuse1',
            partition='Common',
            state='present',
            provider=dict(
                server='localhost',
                password='password',
                user='admin'
            )
        ))
        module = AnsibleModule(
            argument_spec=self.spec.argument_spec,
            supports_check_mode=self.spec.supports_check_mode
        )
        mm = ModuleManager(module=module)
        # Override methods to force specific logic in the module to happen
        mm.exists = Mock(side_effect=[False, True])
        mm.create_on_device = Mock(return_value=True)
        mm.module_provisioned = Mock(return_value=True)
        results = mm.exec_module()
        assert results['changed'] is True
 | 
	gpl-3.0 | -4,717,631,936,433,852,000 | 30.272727 | 97 | 0.644864 | false | 
| 
	wavelets/zipline | 
	zipline/examples/dual_ema_talib.py | 
	2 | 
	3230 | 
	#!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
from datetime import datetime
import pytz
class DualEMATaLib(TradingAlgorithm):
    """Dual Moving Average Crossover algorithm.
    This algorithm buys apple once its short moving average crosses
    its long moving average (indicating upwards momentum) and sells
    its shares once the averages cross again (indicating downwards
    momentum).
    """
    def initialize(self, short_window=20, long_window=40):
        # Add 2 mavg transforms, one with a long window, one
        # with a short window.
        self.short_ema_trans = EMA(timeperiod=short_window)
        self.long_ema_trans = EMA(timeperiod=long_window)
        # To keep track of whether we invested in the stock or not
        self.invested = False
    def handle_data(self, data):
        self.short_ema = self.short_ema_trans.handle_data(data)
        self.long_ema = self.long_ema_trans.handle_data(data)
        if self.short_ema is None or self.long_ema is None:
            return
        self.buy = False
        self.sell = False
        if (self.short_ema > self.long_ema).all() and not self.invested:
            self.order('AAPL', 100)
            self.invested = True
            self.buy = True
        elif (self.short_ema < self.long_ema).all() and self.invested:
            self.order('AAPL', -100)
            self.invested = False
            self.sell = True
        self.record(AAPL=data['AAPL'].price,
                    short_ema=self.short_ema['AAPL'],
                    long_ema=self.long_ema['AAPL'],
                    buy=self.buy,
                    sell=self.sell)
if __name__ == '__main__':
    start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
    end = datetime(1991, 1, 1, 0, 0, 0, 0, pytz.utc)
    data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
                           end=end)
    dma = DualEMATaLib()
    results = dma.run(data).dropna()
    fig = plt.figure()
    ax1 = fig.add_subplot(211, ylabel='portfolio value')
    results.portfolio_value.plot(ax=ax1)
    ax2 = fig.add_subplot(212)
    results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
    ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
             '^', markersize=10, color='m')
    ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
             'v', markersize=10, color='k')
    plt.legend(loc=0)
    plt.gcf().set_size_inches(18, 8)
 | 
	apache-2.0 | -1,037,034,474,115,704,300 | 34.108696 | 77 | 0.644892 | false | 
| 
	crmccreary/openerp_server | 
	openerp/addons/account_bank_statement_extensions/account_bank_statement.py | 
	9 | 
	6553 | 
	# -*- encoding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Management Solution
#
#    Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import osv, fields
import decimal_precision as dp
import netsvc
from tools.translate import _
class account_bank_statement(osv.osv):
    _inherit = 'account.bank.statement'
    def write(self, cr, uid, ids, vals, context=None):
        if context is None:
            context = {}
        # bypass obsolete statement line resequencing
        if vals.get('line_ids', False) or context.get('ebanking_import', False):
            res = super(osv.osv, self).write(cr, uid, ids, vals, context=context)
        else: 
            res = super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
        return res
    def button_confirm_bank(self, cr, uid, ids, context=None):
        super(account_bank_statement, self).button_confirm_bank(cr, uid, ids, context=context)
        for st in self.browse(cr, uid, ids, context=context):
            cr.execute("UPDATE account_bank_statement_line  \
                SET state='confirm' WHERE id in %s ",
                (tuple([x.id for x in st.line_ids]),))
        return True
    def button_cancel(self, cr, uid, ids, context=None):
        super(account_bank_statement, self).button_cancel(cr, uid, ids, context=context)
        for st in self.browse(cr, uid, ids, context=context):
            if st.line_ids:
                cr.execute("UPDATE account_bank_statement_line  \
                    SET state='draft' WHERE id in %s ",
                    (tuple([x.id for x in st.line_ids]),))
        return True
account_bank_statement()
class account_bank_statement_line_global(osv.osv):
    _name = 'account.bank.statement.line.global'
    _description = 'Batch Payment Info'
    _columns = {
        'name': fields.char('Communication', size=128, required=True),
        'code': fields.char('Code', size=64, required=True),
        'parent_id': fields.many2one('account.bank.statement.line.global', 'Parent Code', ondelete='cascade'),
        'child_ids': fields.one2many('account.bank.statement.line.global', 'parent_id', 'Child Codes'),
        'type': fields.selection([
            ('iso20022', 'ISO 20022'),
            ('coda', 'CODA'),
            ('manual', 'Manual'), 
            ], 'Type', required=True),
        'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
        'bank_statement_line_ids': fields.one2many('account.bank.statement.line', 'globalisation_id', 'Bank Statement Lines'),
    }
    _rec_name = 'code'
    _defaults = {
        'code': lambda s,c,u,ctx={}: s.pool.get('ir.sequence').get(c, u, 'account.bank.statement.line.global'),
        'name': '/',
    }
    _sql_constraints = [
        ('code_uniq', 'unique (code)', 'The code must be unique !'),
    ]
    def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
        if not args:
            args = []
        ids = []
        if name:
            ids = self.search(cr, user, [('code', 'ilike', name)] + args, limit=limit)
            if not ids:
                ids = self.search(cr, user, [('name', operator, name)] + args, limit=limit)
            if not ids and len(name.split()) >= 2:
                #Separating code and name for searching
                operand1, operand2 = name.split(' ', 1) #name can contain spaces
                ids = self.search(cr, user, [('code', 'like', operand1), ('name', operator, operand2)] + args, limit=limit)
        else:
            ids = self.search(cr, user, args, context=context, limit=limit)
        return self.name_get(cr, user, ids, context=context)
account_bank_statement_line_global()
class account_bank_statement_line(osv.osv):
    _inherit = 'account.bank.statement.line'
    _columns = {
        'date': fields.date('Entry Date', required=True, states={'confirm': [('readonly', True)]}),
        'val_date': fields.date('Valuta Date', states={'confirm': [('readonly', True)]}),
        'globalisation_id': fields.many2one('account.bank.statement.line.global', 'Globalisation ID',
            states={'confirm': [('readonly', True)]}, 
            help="Code to identify transactions belonging to the same globalisation level within a batch payment"),
        'globalisation_amount': fields.related('globalisation_id', 'amount', type='float',
            relation='account.bank.statement.line.global', string='Glob. Amount', readonly=True),
        'journal_id': fields.related('statement_id', 'journal_id', type='many2one', relation='account.journal', string='Journal', store=True, readonly=True),
        'state': fields.selection([('draft', 'Draft'), ('confirm', 'Confirmed')],
            'State', required=True, readonly=True),    
        'counterparty_name': fields.char('Counterparty Name', size=35),
        'counterparty_bic': fields.char('Counterparty BIC', size=11),
        'counterparty_number': fields.char('Counterparty Number', size=34),
        'counterparty_currency': fields.char('Counterparty Currency', size=3),
    }
    _defaults = {
        'state': 'draft',
    }
    def unlink(self, cr, uid, ids, context=None):
        if context is None:
            context = {}
        if context.get('block_statement_line_delete', False):
            raise osv.except_osv(_('Warning'), _('Delete operation not allowed ! \
            Please go to the associated bank statement in order to delete and/or modify this bank statement line'))
        return super(account_bank_statement_line, self).unlink(cr, uid, ids, context=context)
account_bank_statement_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | 
	agpl-3.0 | 8,069,233,019,505,464,000 | 46.839416 | 157 | 0.610255 | false | 
| 
	ProfessionalIT/maxigenios-website | 
	sdk/google_appengine/lib/django-1.2/django/db/models/sql/datastructures.py | 
	396 | 
	1157 | 
	"""
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
    pass
class FullResultSet(Exception):
    pass
class MultiJoin(Exception):
    """
    Used by join construction code to indicate the point at which a
    multi-valued join was attempted (if the caller wants to treat that
    exceptionally).
    """
    def __init__(self, level):
        self.level = level
class Empty(object):
    pass
class RawValue(object):
    def __init__(self, value):
        self.value = value
class Date(object):
    """
    Add a date selection column.
    """
    def __init__(self, col, lookup_type):
        self.col = col
        self.lookup_type = lookup_type
    def relabel_aliases(self, change_map):
        c = self.col
        if isinstance(c, (list, tuple)):
            self.col = (change_map.get(c[0], c[0]), c[1])
    def as_sql(self, qn, connection):
        if isinstance(self.col, (list, tuple)):
            col = '%s.%s' % tuple([qn(c) for c in self.col])
        else:
            col = self.col
        return connection.ops.date_trunc_sql(self.lookup_type, col)
 | 
	mit | 2,795,459,522,885,896,000 | 24.152174 | 76 | 0.607606 | false | 
| 
	wuga214/Django-Wuga | 
	env/lib/python2.7/site-packages/pip/_vendor/distlib/markers.py | 
	1261 | 
	6282 | 
	# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
    """
    A limited evaluator for Python expressions.
    """
    operators = {
        'eq': lambda x, y: x == y,
        'gt': lambda x, y: x > y,
        'gte': lambda x, y: x >= y,
        'in': lambda x, y: x in y,
        'lt': lambda x, y: x < y,
        'lte': lambda x, y: x <= y,
        'not': lambda x: not x,
        'noteq': lambda x, y: x != y,
        'notin': lambda x, y: x not in y,
    }
    allowed_values = {
        'sys_platform': sys.platform,
        'python_version': '%s.%s' % sys.version_info[:2],
        # parsing sys.platform is not reliable, but there is no other
        # way to get e.g. 2.7.2+, and the PEP is defined with sys.version
        'python_full_version': sys.version.split(' ', 1)[0],
        'os_name': os.name,
        'platform_in_venv': str(in_venv()),
        'platform_release': platform.release(),
        'platform_version': platform.version(),
        'platform_machine': platform.machine(),
        'platform_python_implementation': python_implementation(),
    }
    def __init__(self, context=None):
        """
        Initialise an instance.
        :param context: If specified, names are looked up in this mapping.
        """
        self.context = context or {}
        self.source = None
    def get_fragment(self, offset):
        """
        Get the part of the source which is causing a problem.
        """
        fragment_len = 10
        s = '%r' % (self.source[offset:offset + fragment_len])
        if offset + fragment_len < len(self.source):
            s += '...'
        return s
    def get_handler(self, node_type):
        """
        Get a handler for the specified AST node type.
        """
        return getattr(self, 'do_%s' % node_type, None)
    def evaluate(self, node, filename=None):
        """
        Evaluate a source string or node, using ``filename`` when
        displaying errors.
        """
        if isinstance(node, string_types):
            self.source = node
            kwargs = {'mode': 'eval'}
            if filename:
                kwargs['filename'] = filename
            try:
                node = ast.parse(node, **kwargs)
            except SyntaxError as e:
                s = self.get_fragment(e.offset)
                raise SyntaxError('syntax error %s' % s)
        node_type = node.__class__.__name__.lower()
        handler = self.get_handler(node_type)
        if handler is None:
            if self.source is None:
                s = '(source not available)'
            else:
                s = self.get_fragment(node.col_offset)
            raise SyntaxError("don't know how to evaluate %r %s" % (
                node_type, s))
        return handler(node)
    def get_attr_key(self, node):
        assert isinstance(node, ast.Attribute), 'attribute node expected'
        return '%s.%s' % (node.value.id, node.attr)
    def do_attribute(self, node):
        if not isinstance(node.value, ast.Name):
            valid = False
        else:
            key = self.get_attr_key(node)
            valid = key in self.context or key in self.allowed_values
        if not valid:
            raise SyntaxError('invalid expression: %s' % key)
        if key in self.context:
            result = self.context[key]
        else:
            result = self.allowed_values[key]
        return result
    def do_boolop(self, node):
        result = self.evaluate(node.values[0])
        is_or = node.op.__class__ is ast.Or
        is_and = node.op.__class__ is ast.And
        assert is_or or is_and
        if (is_and and result) or (is_or and not result):
            for n in node.values[1:]:
                result = self.evaluate(n)
                if (is_or and result) or (is_and and not result):
                    break
        return result
    def do_compare(self, node):
        def sanity_check(lhsnode, rhsnode):
            valid = True
            if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
                valid = False
            #elif (isinstance(lhsnode, ast.Attribute)
            #      and isinstance(rhsnode, ast.Attribute)):
            #    klhs = self.get_attr_key(lhsnode)
            #    krhs = self.get_attr_key(rhsnode)
            #    valid = klhs != krhs
            if not valid:
                s = self.get_fragment(node.col_offset)
                raise SyntaxError('Invalid comparison: %s' % s)
        lhsnode = node.left
        lhs = self.evaluate(lhsnode)
        result = True
        for op, rhsnode in zip(node.ops, node.comparators):
            sanity_check(lhsnode, rhsnode)
            op = op.__class__.__name__.lower()
            if op not in self.operators:
                raise SyntaxError('unsupported operation: %r' % op)
            rhs = self.evaluate(rhsnode)
            result = self.operators[op](lhs, rhs)
            if not result:
                break
            lhs = rhs
            lhsnode = rhsnode
        return result
    def do_expression(self, node):
        return self.evaluate(node.body)
    def do_name(self, node):
        valid = False
        if node.id in self.context:
            valid = True
            result = self.context[node.id]
        elif node.id in self.allowed_values:
            valid = True
            result = self.allowed_values[node.id]
        if not valid:
            raise SyntaxError('invalid expression: %s' % node.id)
        return result
    def do_str(self, node):
        return node.s
def interpret(marker, execution_context=None):
    """
    Interpret a marker and return a result depending on environment.
    :param marker: The marker to interpret.
    :type marker: str
    :param execution_context: The context used for name lookup.
    :type execution_context: mapping
    """
    return Evaluator(execution_context).evaluate(marker.strip())
 | 
	apache-2.0 | 4,225,784,778,292,974,600 | 32.063158 | 77 | 0.552053 | false | 
| 
	paulgclark/rf_utilities | 
	zmq_utils.py | 
	1 | 
	1239 | 
	import zmq
import array
import time
import struct
import numpy as np
import pmt
import sys
class zmq_pull_socket():
    def __init__(self, tcp_str, verbose=0):
        self.context = zmq.Context()
        self.receiver = self.context.socket(zmq.PULL)
        self.receiver.connect(tcp_str)
    def poll(self, type_str='f', verbose=0):
        raw_data = self.receiver.recv()
        a = array.array(type_str, raw_data)
        return a
    
    def poll_message(self):
        msg = self.receiver.recv()
        # this is a binary string, convert it to a list of ints
        byte_list = []
        for byte in msg:
            byte_list.append(ord(byte))
        return byte_list
    # incomplete attempt to optimize data flow by
    # sending bytes instead of floats; flowgraph
    # changes needed to support this, as well
    # as all downstream code reworked to use
    # bytes
    def poll_short(self, type_str='h', verbose=0):
        raw_data = self.receiver.recv()
        a = array.array(type_str, raw_data)
        npa_s = np.asarray(a)
        npa_f = npa_s.astype(float)
        npa_f *= (1.0/10000.0)
        #fmt = "<%dI" % (len(raw_data) //4)
        #a = list(struct.unpack(fmt, raw_data))
        return list(npa_f)
 | 
	mit | 2,149,555,878,925,307,400 | 27.813953 | 63 | 0.596449 | false | 
| 
	kcpawan/django | 
	tests/many_to_one/models.py | 
	215 | 
	2785 | 
	"""
Many-to-one relationships
To define a many-to-one relationship, use ``ForeignKey()``.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
    first_name = models.CharField(max_length=30)
    last_name = models.CharField(max_length=30)
    email = models.EmailField()
    def __str__(self):
        return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
    headline = models.CharField(max_length=100)
    pub_date = models.DateField()
    reporter = models.ForeignKey(Reporter, models.CASCADE)
    def __str__(self):
        return self.headline
    class Meta:
        ordering = ('headline',)
# If ticket #1578 ever slips back in, these models will not be able to be
# created (the field names being lower-cased versions of their opposite
# classes is important here).
class First(models.Model):
    second = models.IntegerField()
class Second(models.Model):
    first = models.ForeignKey(First, models.CASCADE, related_name='the_first')
# Protect against repetition of #1839, #2415 and #2536.
class Third(models.Model):
    name = models.CharField(max_length=20)
    third = models.ForeignKey('self', models.SET_NULL, null=True, related_name='child_set')
class Parent(models.Model):
    name = models.CharField(max_length=20, unique=True)
    bestchild = models.ForeignKey('Child', models.SET_NULL, null=True, related_name='favored_by')
class Child(models.Model):
    name = models.CharField(max_length=20)
    parent = models.ForeignKey(Parent, models.CASCADE)
class ToFieldChild(models.Model):
    parent = models.ForeignKey(Parent, models.CASCADE, to_field='name')
# Multiple paths to the same model (#7110, #7125)
@python_2_unicode_compatible
class Category(models.Model):
    name = models.CharField(max_length=20)
    def __str__(self):
        return self.name
class Record(models.Model):
    category = models.ForeignKey(Category, models.CASCADE)
@python_2_unicode_compatible
class Relation(models.Model):
    left = models.ForeignKey(Record, models.CASCADE, related_name='left_set')
    right = models.ForeignKey(Record, models.CASCADE, related_name='right_set')
    def __str__(self):
        return "%s - %s" % (self.left.category.name, self.right.category.name)
# Test related objects visibility.
class SchoolManager(models.Manager):
    def get_queryset(self):
        return super(SchoolManager, self).get_queryset().filter(is_public=True)
class School(models.Model):
    is_public = models.BooleanField(default=False)
    objects = SchoolManager()
class Student(models.Model):
    school = models.ForeignKey(School, models.CASCADE)
 | 
	bsd-3-clause | -1,773,659,522,397,237,800 | 26.85 | 97 | 0.712029 | false | 
| 
	xguse/ggplot | 
	ggplot/geoms/geom_abline.py | 
	12 | 
	1487 | 
	from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
import numpy as np
from ggplot.utils import make_iterable_ntimes
from .geom import geom
# Note when documenting
# slope and intercept can be functions that compute the slope
# and intercept using the data. If that is the case then the
# x and y aesthetics must be mapped
class geom_abline(geom):
    DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
                   'alpha': None, 'size': 1.0, 'x': None,
                   'y': None}
    REQUIRED_AES = {'slope', 'intercept'}
    DEFAULT_PARAMS = {'stat': 'abline', 'position': 'identity'}
    _aes_renames = {'linetype': 'linestyle', 'size': 'linewidth'}
    def _plot_unit(self, pinfo, ax):
        slope = pinfo['slope']
        intercept = pinfo['intercept']
        n = len(slope)
        linewidth = make_iterable_ntimes(pinfo['linewidth'], n)
        linestyle = make_iterable_ntimes(pinfo['linestyle'], n)
        alpha = make_iterable_ntimes(pinfo['alpha'], n)
        color = make_iterable_ntimes(pinfo['color'], n)
        ax.set_autoscale_on(False)
        xlim = ax.get_xlim()
        _x = np.array([np.min(xlim), np.max(xlim)])
        for i in range(len(slope)):
            _y = _x * slope[i] + intercept[i]
            ax.plot(_x, _y,
                    linewidth=linewidth[i],
                    linestyle=linestyle[i],
                    alpha=alpha[i],
                    color=color[i])
 | 
	bsd-2-clause | -1,214,483,387,473,522,000 | 34.404762 | 66 | 0.569603 | false | 
| 
	Vegasvikk/django-cms | 
	cms/models/static_placeholder.py | 
	49 | 
	3452 | 
	import uuid
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
def static_slotname(instance):
    """
    Returns a string to be used as the slot
    for the static placeholder field.
    """
    return instance.code
@python_2_unicode_compatible
class StaticPlaceholder(models.Model):
    CREATION_BY_TEMPLATE = 'template'
    CREATION_BY_CODE = 'code'
    CREATION_METHODS = (
        (CREATION_BY_TEMPLATE, _('by template')),
        (CREATION_BY_CODE, _('by code')),
    )
    name = models.CharField(
        verbose_name=_(u'static placeholder name'), max_length=255, blank=True, default='',
        help_text=_(u'Descriptive name to identify this static placeholder. Not displayed to users.'))
    code = models.CharField(
        verbose_name=_(u'placeholder code'), max_length=255, blank=True,
        help_text=_(u'To render the static placeholder in templates.'))
    draft = PlaceholderField(static_slotname, verbose_name=_(u'placeholder content'), related_name='static_draft')
    public = PlaceholderField(static_slotname, editable=False, related_name='static_public')
    dirty = models.BooleanField(default=False, editable=False)
    creation_method = models.CharField(
        verbose_name=_('creation_method'), choices=CREATION_METHODS,
        default=CREATION_BY_CODE, max_length=20, blank=True,
    )
    site = models.ForeignKey(Site, null=True, blank=True)
    class Meta:
        verbose_name = _(u'static placeholder')
        verbose_name_plural = _(u'static placeholders')
        app_label = 'cms'
        unique_together = (('code', 'site'),)
    def __str__(self):
        return self.name
    def clean(self):
        # TODO: check for clashes if the random code is already taken
        if not self.code:
            self.code = u'static-%s' % uuid.uuid4()
        if not self.site:
            placeholders = StaticPlaceholder.objects.filter(code=self.code, site__isnull=True)
            if self.pk:
                placeholders = placeholders.exclude(pk=self.pk)
            if placeholders.exists():
                raise ValidationError(_("A static placeholder with the same site and code already exists"))
    def publish(self, request, language, force=False):
        if force or self.has_publish_permission(request):
            self.public.clear(language=language)
            plugins = self.draft.get_plugins_list(language=language)
            copy_plugins_to(plugins, self.public, no_signals=True)
            self.dirty = False
            self.save()
            return True
        return False
    def has_change_permission(self, request):
        if request.user.is_superuser:
            return True
        opts = self._meta
        return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
    def has_publish_permission(self, request):
        if request.user.is_superuser:
            return True
        opts = self._meta
        return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts)) and \
               request.user.has_perm(opts.app_label + '.' + 'publish_page')
 | 
	bsd-3-clause | 5,500,780,088,044,019,000 | 39.139535 | 114 | 0.659328 | false | 
| 
	home-assistant/home-assistant | 
	tests/components/hisense_aehw4a1/test_init.py | 
	5 | 
	2927 | 
	"""Tests for the Hisense AEH-W4A1 init file."""
from unittest.mock import patch
from pyaehw4a1 import exceptions
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import hisense_aehw4a1
from homeassistant.setup import async_setup_component
async def test_creating_entry_sets_up_climate_discovery(hass):
    """Test setting up Hisense AEH-W4A1 loads the climate component."""
    with patch(
        "homeassistant.components.hisense_aehw4a1.config_flow.AehW4a1.discovery",
        return_value=["1.2.3.4"],
    ), patch(
        "homeassistant.components.hisense_aehw4a1.climate.async_setup_entry",
        return_value=True,
    ) as mock_setup:
        result = await hass.config_entries.flow.async_init(
            hisense_aehw4a1.DOMAIN, context={"source": config_entries.SOURCE_USER}
        )
        # Confirmation form
        assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
        result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
        assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
        await hass.async_block_till_done()
    assert len(mock_setup.mock_calls) == 1
async def test_configuring_hisense_w4a1_create_entry(hass):
    """Test that specifying config will create an entry."""
    with patch(
        "homeassistant.components.hisense_aehw4a1.config_flow.AehW4a1.check",
        return_value=True,
    ), patch(
        "homeassistant.components.hisense_aehw4a1.async_setup_entry",
        return_value=True,
    ) as mock_setup:
        await async_setup_component(
            hass,
            hisense_aehw4a1.DOMAIN,
            {"hisense_aehw4a1": {"ip_address": ["1.2.3.4"]}},
        )
        await hass.async_block_till_done()
    assert len(mock_setup.mock_calls) == 1
async def test_configuring_hisense_w4a1_not_creates_entry_for_device_not_found(hass):
    """Test that specifying config will not create an entry."""
    with patch(
        "homeassistant.components.hisense_aehw4a1.config_flow.AehW4a1.check",
        side_effect=exceptions.ConnectionError,
    ), patch(
        "homeassistant.components.hisense_aehw4a1.async_setup_entry",
        return_value=True,
    ) as mock_setup:
        await async_setup_component(
            hass,
            hisense_aehw4a1.DOMAIN,
            {"hisense_aehw4a1": {"ip_address": ["1.2.3.4"]}},
        )
        await hass.async_block_till_done()
    assert len(mock_setup.mock_calls) == 0
async def test_configuring_hisense_w4a1_not_creates_entry_for_empty_import(hass):
    """Test that specifying config will not create an entry."""
    with patch(
        "homeassistant.components.hisense_aehw4a1.async_setup_entry",
        return_value=True,
    ) as mock_setup:
        await async_setup_component(hass, hisense_aehw4a1.DOMAIN, {})
        await hass.async_block_till_done()
    assert len(mock_setup.mock_calls) == 0
 | 
	apache-2.0 | 3,011,705,839,678,666,000 | 34.695122 | 86 | 0.665186 | false | 
| 
	ChanChiChoi/scikit-learn | 
	examples/model_selection/plot_roc.py | 
	146 | 
	3697 | 
	"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
    See also :func:`sklearn.metrics.roc_auc_score`,
             :ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
                                                    random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
                                 random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
    fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
    roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
         label='micro-average ROC curve (area = {0:0.2f})'
               ''.format(roc_auc["micro"]))
for i in range(n_classes):
    plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
                                   ''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
 | 
	bsd-3-clause | 7,034,067,062,988,640,000 | 34.548077 | 79 | 0.675412 | false | 
| 
	difro/kubernetes | 
	examples/cluster-dns/images/backend/server.py | 
	468 | 
	1313 | 
	#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
  # Handler for the GET requests
  def do_GET(self):
    self.send_response(200)
    self.send_header('Content-type','text/html')
    self.end_headers()
    self.wfile.write("Hello World!")
try:
  # Create a web server and define the handler to manage the incoming request.
  server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
  print 'Started httpserver on port ' , PORT_NUMBER
  server.serve_forever()
except KeyboardInterrupt:
  print '^C received, shutting down the web server'
  server.socket.close()
 | 
	apache-2.0 | 6,005,757,674,417,908,000 | 34.486486 | 78 | 0.752475 | false | 
| 
	vipulkanade/EventbriteDjango | 
	lib/python2.7/site-packages/django/middleware/clickjacking.py | 
	284 | 
	1989 | 
	"""
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
    """
    Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
    Does not set the header if it's already set or if the response contains
    a xframe_options_exempt value set to True.
    By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
    response can only be loaded on a frame within the same site. To prevent the
    response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
    your project's Django settings to 'DENY'.
    Note: older browsers will quietly ignore this header, thus other
    clickjacking protection techniques should be used if protection in those
    browsers is required.
    https://en.wikipedia.org/wiki/Clickjacking#Server_and_client
    """
    def process_response(self, request, response):
        # Don't set it if it's already in the response
        if response.get('X-Frame-Options') is not None:
            return response
        # Don't set it if they used @xframe_options_exempt
        if getattr(response, 'xframe_options_exempt', False):
            return response
        response['X-Frame-Options'] = self.get_xframe_options_value(request,
                                                                    response)
        return response
    def get_xframe_options_value(self, request, response):
        """
        Gets the value to set for the X_FRAME_OPTIONS header.
        By default this uses the value from the X_FRAME_OPTIONS Django
        settings. If not found in settings, defaults to 'SAMEORIGIN'.
        This method can be overridden if needed, allowing it to vary based on
        the request or response.
        """
        return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
 | 
	mit | 6,088,704,932,504,675,000 | 37.25 | 79 | 0.677728 | false | 
| 
	rcolasanti/CompaniesHouseScraper | 
	DVLACompanyNmeMatchCoHoAPIFindMissing.py | 
	1 | 
	5174 | 
	
import requests
import json
import numpy as np
import pandas as pd
import CoHouseToken
from difflib import SequenceMatcher
# In[3]:
def exactMatch(line1, line2):
    line1=line1.upper().rstrip()    
    line2=line2.upper().rstrip()
    #print("|"+line1+"|"+line2+"|",line1==line2)
    return line1==line2
def aStopWord(word):
    return word.upper().replace("COMPANY","CO").replace("LIMITED","LTD").replace("&","AND").rstrip() 
def spaces(word):
    w = word.upper().replace("/"," ")
    w = w.replace("."," ").replace(","," ").replace("-"," ").rstrip() 
    return w
def removeAStopWord(word):
    w = word.upper().replace("LTD"," ").replace("CO"," ").replace("AND"," ").replace("("," ").replace("/"," ")
    w = w.replace(")"," ").replace("."," ").replace(","," ").replace("-"," ").rstrip() 
    return w
def removeABlank(word):
    w = word.replace(" ","")
    return w
def removeABracket (line):
    flag = False
    word=""
    for a in line:
        if a=="(":
            flag = True
            a=""
        if a==")":
            a=""
            flag = False
        if flag:
            a=""
        word+=a
    return word
    
def stopWord(line1, line2):
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    #print("|"+line1+"|"+line2+"|",line1==line2)
    return line1==line2
def removeStopWord(line1, line2):
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    #print("|"+line1+"|"+line2+"|",line1==line2)
    return line1==line2
def removeBlanks(line1, line2):
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    line1=removeABlank(line1)  
    line2=removeABlank(line2)
    return line1==line2
def removeBrackets(line1, line2):
    line1=removeABracket(line1)  
    line2=removeABracket(line2)
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    line1=removeABlank(line1)  
    line2=removeABlank(line2)
   #print("|"+line1+"|"+line2+"|",line1==line2)
    
    return line1==line2
def strip(line1, line2):
    line1=removeABracket(line1)  
    line2=removeABracket(line2)
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    line1=removeABlank(line1)  
    line2=removeABlank(line2)
    
    return line1,line2
def match(company,results):
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(exactMatch(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(stopWord(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(removeStopWord(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(removeBlanks(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(removeBrackets(company,line)):
            return True,line,number
        
        #old_match(company,results)
    return False,"",""
def main(args):
    print(args[0])
    search_url ="https://api.companieshouse.gov.uk/search/companies?q="
    token = CoHouseToken.getToken()
    pw = ''
    base_url = 'https://api.companieshouse.gov.uk'
    file = args[1]
    print(file)
    df = pd.read_csv(file,names=['Organisation'])
    companies = df.Organisation
    count=0
    found = open("found2.csv",'w')
    missing = open("missing2.csv",'w')
    for c in companies:
        c =c.upper().replace("&","AND")
        c = c.split(" T/A ")[0]
        c = c.split("WAS ")[0]
        c= spaces(c)
        url=search_url+c
        results = json.loads(requests.get(url, auth=(token,pw)).text)
        for i , key  in enumerate(results['items']):
            a,b = strip(c, key['title'])
            r = SequenceMatcher(None, a, b).ratio()
            print("%s \t %s\t %.2f \t %s \t %s"%(i,c,r,key['company_number'],key['title']))
        
        v = input('type number or return to reject: ')
        if v =="":
            print("reject")
            missing.write("%s\n"%(c))
        else:
            key = results['items'][int(v)]
            print("%s \t %s\t %.2f \t %s \t %s"%(v,c,r,key['company_number'],key['title']))
            print("*************************")
            found.write("%s,%s,%s,\n"%(c,key['title'],key['company_number']))
        
            
    print()
    #print(count/len(companies))
    return 0
if __name__ == '__main__':
    import sys
    sys.exit(main(sys.argv))
 | 
	gpl-3.0 | -9,107,299,095,821,690,000 | 20.380165 | 110 | 0.55547 | false | 
| 
	lyft/incubator-airflow | 
	airflow/kubernetes/pod_generator.py | 
	2 | 
	22063 | 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
import copy
import inspect
import os
import uuid
from functools import reduce
from typing import Dict, List, Optional, Union
import kubernetes.client.models as k8s
import yaml
from kubernetes.client.api_client import ApiClient
from airflow.exceptions import AirflowConfigException
from airflow.version import version as airflow_version
MAX_POD_ID_LEN = 253
class PodDefaults:
    """
    Static defaults for Pods
    """
    XCOM_MOUNT_PATH = '/airflow/xcom'
    SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
    XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;'
    VOLUME_MOUNT = k8s.V1VolumeMount(
        name='xcom',
        mount_path=XCOM_MOUNT_PATH
    )
    VOLUME = k8s.V1Volume(
        name='xcom',
        empty_dir=k8s.V1EmptyDirVolumeSource()
    )
    SIDECAR_CONTAINER = k8s.V1Container(
        name=SIDECAR_CONTAINER_NAME,
        command=['sh', '-c', XCOM_CMD],
        image='alpine',
        volume_mounts=[VOLUME_MOUNT],
        resources=k8s.V1ResourceRequirements(
            requests={
                "cpu": "1m",
            }
        ),
    )
class PodGenerator:
    """
    Contains Kubernetes Airflow Worker configuration logic
    Represents a kubernetes pod and manages execution of a single pod.
    Any configuration that is container specific gets applied to
    the first container in the list of containers.
    :param image: The docker image
    :type image: Optional[str]
    :param name: name in the metadata section (not the container name)
    :type name: Optional[str]
    :param namespace: pod namespace
    :type namespace: Optional[str]
    :param volume_mounts: list of kubernetes volumes mounts
    :type volume_mounts: Optional[List[Union[k8s.V1VolumeMount, dict]]]
    :param envs: A dict containing the environment variables
    :type envs: Optional[Dict[str, str]]
    :param cmds: The command to be run on the first container
    :type cmds: Optional[List[str]]
    :param args: The arguments to be run on the pod
    :type args: Optional[List[str]]
    :param labels: labels for the pod metadata
    :type labels: Optional[Dict[str, str]]
    :param node_selectors: node selectors for the pod
    :type node_selectors: Optional[Dict[str, str]]
    :param ports: list of ports. Applies to the first container.
    :type ports: Optional[List[Union[k8s.V1ContainerPort, dict]]]
    :param volumes: Volumes to be attached to the first container
    :type volumes: Optional[List[Union[k8s.V1Volume, dict]]]
    :param image_pull_policy: Specify a policy to cache or always pull an image
    :type image_pull_policy: str
    :param restart_policy: The restart policy of the pod
    :type restart_policy: str
    :param image_pull_secrets: Any image pull secrets to be given to the pod.
        If more than one secret is required, provide a comma separated list:
        secret_a,secret_b
    :type image_pull_secrets: str
    :param init_containers: A list of init containers
    :type init_containers: Optional[List[k8s.V1Container]]
    :param service_account_name: Identity for processes that run in a Pod
    :type service_account_name: Optional[str]
    :param resources: Resource requirements for the first containers
    :type resources: Optional[Union[k8s.V1ResourceRequirements, dict]]
    :param annotations: annotations for the pod
    :type annotations: Optional[Dict[str, str]]
    :param affinity: A dict containing a group of affinity scheduling rules
    :type affinity: Optional[dict]
    :param hostnetwork: If True enable host networking on the pod
    :type hostnetwork: bool
    :param tolerations: A list of kubernetes tolerations
    :type tolerations: Optional[list]
    :param security_context: A dict containing the security context for the pod
    :type security_context: Optional[Union[k8s.V1PodSecurityContext, dict]]
    :param configmaps: Any configmap refs to envfrom.
        If more than one configmap is required, provide a comma separated list
        configmap_a,configmap_b
    :type configmaps: List[str]
    :param dnspolicy: Specify a dnspolicy for the pod
    :type dnspolicy: Optional[str]
    :param schedulername: Specify a schedulername for the pod
    :type schedulername: Optional[str]
    :param pod: The fully specified pod. Mutually exclusive with `path_or_string`
    :type pod: Optional[kubernetes.client.models.V1Pod]
    :param pod_template_file: Path to YAML file. Mutually exclusive with `pod`
    :type pod_template_file: Optional[str]
    :param extract_xcom: Whether to bring up a container for xcom
    :type extract_xcom: bool
    """
    def __init__(  # pylint: disable=too-many-arguments,too-many-locals
        self,
        image: Optional[str] = None,
        name: Optional[str] = None,
        namespace: Optional[str] = None,
        volume_mounts: Optional[List[Union[k8s.V1VolumeMount, dict]]] = None,
        envs: Optional[Dict[str, str]] = None,
        cmds: Optional[List[str]] = None,
        args: Optional[List[str]] = None,
        labels: Optional[Dict[str, str]] = None,
        node_selectors: Optional[Dict[str, str]] = None,
        ports: Optional[List[Union[k8s.V1ContainerPort, dict]]] = None,
        volumes: Optional[List[Union[k8s.V1Volume, dict]]] = None,
        image_pull_policy: Optional[str] = None,
        restart_policy: Optional[str] = None,
        image_pull_secrets: Optional[str] = None,
        init_containers: Optional[List[k8s.V1Container]] = None,
        service_account_name: Optional[str] = None,
        resources: Optional[Union[k8s.V1ResourceRequirements, dict]] = None,
        annotations: Optional[Dict[str, str]] = None,
        affinity: Optional[dict] = None,
        hostnetwork: bool = False,
        tolerations: Optional[list] = None,
        security_context: Optional[Union[k8s.V1PodSecurityContext, dict]] = None,
        configmaps: Optional[List[str]] = None,
        dnspolicy: Optional[str] = None,
        schedulername: Optional[str] = None,
        pod: Optional[k8s.V1Pod] = None,
        pod_template_file: Optional[str] = None,
        extract_xcom: bool = False,
    ):
        self.validate_pod_generator_args(locals())
        if pod_template_file:
            self.ud_pod = self.deserialize_model_file(pod_template_file)
        else:
            self.ud_pod = pod
        self.pod = k8s.V1Pod()
        self.pod.api_version = 'v1'
        self.pod.kind = 'Pod'
        # Pod Metadata
        self.metadata = k8s.V1ObjectMeta()
        self.metadata.labels = labels
        self.metadata.name = name
        self.metadata.namespace = namespace
        self.metadata.annotations = annotations
        # Pod Container
        self.container = k8s.V1Container(name='base')
        self.container.image = image
        self.container.env = []
        if envs:
            if isinstance(envs, dict):
                for key, val in envs.items():
                    self.container.env.append(k8s.V1EnvVar(
                        name=key,
                        value=val
                    ))
            elif isinstance(envs, list):
                self.container.env.extend(envs)
        configmaps = configmaps or []
        self.container.env_from = []
        for configmap in configmaps:
            self.container.env_from.append(k8s.V1EnvFromSource(
                config_map_ref=k8s.V1ConfigMapEnvSource(
                    name=configmap
                )
            ))
        self.container.command = cmds or []
        self.container.args = args or []
        self.container.image_pull_policy = image_pull_policy
        self.container.ports = ports or []
        self.container.resources = resources
        self.container.volume_mounts = volume_mounts or []
        # Pod Spec
        self.spec = k8s.V1PodSpec(containers=[])
        self.spec.security_context = security_context
        self.spec.tolerations = tolerations
        self.spec.dns_policy = dnspolicy
        self.spec.scheduler_name = schedulername
        self.spec.host_network = hostnetwork
        self.spec.affinity = affinity
        self.spec.service_account_name = service_account_name
        self.spec.init_containers = init_containers
        self.spec.volumes = volumes or []
        self.spec.node_selector = node_selectors
        self.spec.restart_policy = restart_policy
        self.spec.image_pull_secrets = []
        if image_pull_secrets:
            for image_pull_secret in image_pull_secrets.split(','):
                self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(
                    name=image_pull_secret
                ))
        # Attach sidecar
        self.extract_xcom = extract_xcom
    def gen_pod(self) -> k8s.V1Pod:
        """Generates pod"""
        result = self.ud_pod
        if result is None:
            result = self.pod
            result.spec = self.spec
            result.metadata = self.metadata
            result.spec.containers = [self.container]
        result.metadata.name = self.make_unique_pod_id(result.metadata.name)
        if self.extract_xcom:
            result = self.add_sidecar(result)
        return result
    @staticmethod
    def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
        """Adds sidecar"""
        pod_cp = copy.deepcopy(pod)
        pod_cp.spec.volumes = pod.spec.volumes or []
        pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
        pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
        pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
        pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
        return pod_cp
    @staticmethod
    def from_obj(obj) -> Optional[k8s.V1Pod]:
        """Converts to pod from obj"""
        if obj is None:
            return None
        if isinstance(obj, PodGenerator):
            return obj.gen_pod()
        if not isinstance(obj, dict):
            raise TypeError(
                'Cannot convert a non-dictionary or non-PodGenerator '
                'object into a KubernetesExecutorConfig')
        # We do not want to extract constant here from ExecutorLoader because it is just
        # A name in dictionary rather than executor selection mechanism and it causes cyclic import
        namespaced = obj.get("KubernetesExecutor", {})
        if not namespaced:
            return None
        resources = namespaced.get('resources')
        if resources is None:
            requests = {
                'cpu': namespaced.get('request_cpu'),
                'memory': namespaced.get('request_memory')
            }
            limits = {
                'cpu': namespaced.get('limit_cpu'),
                'memory': namespaced.get('limit_memory')
            }
            all_resources = list(requests.values()) + list(limits.values())
            if all(r is None for r in all_resources):
                resources = None
            else:
                resources = k8s.V1ResourceRequirements(
                    requests=requests,
                    limits=limits
                )
        namespaced['resources'] = resources
        return PodGenerator(**namespaced).gen_pod()
    @staticmethod
    def reconcile_pods(base_pod: k8s.V1Pod, client_pod: Optional[k8s.V1Pod]) -> k8s.V1Pod:
        """
        :param base_pod: has the base attributes which are overwritten if they exist
            in the client pod and remain if they do not exist in the client_pod
        :type base_pod: k8s.V1Pod
        :param client_pod: the pod that the client wants to create.
        :type client_pod: k8s.V1Pod
        :return: the merged pods
        This can't be done recursively as certain fields some overwritten, and some concatenated.
        """
        if client_pod is None:
            return base_pod
        client_pod_cp = copy.deepcopy(client_pod)
        client_pod_cp.spec = PodGenerator.reconcile_specs(base_pod.spec, client_pod_cp.spec)
        client_pod_cp.metadata = merge_objects(base_pod.metadata, client_pod_cp.metadata)
        client_pod_cp = merge_objects(base_pod, client_pod_cp)
        return client_pod_cp
    @staticmethod
    def reconcile_specs(base_spec: Optional[k8s.V1PodSpec],
                        client_spec: Optional[k8s.V1PodSpec]) -> Optional[k8s.V1PodSpec]:
        """
        :param base_spec: has the base attributes which are overwritten if they exist
            in the client_spec and remain if they do not exist in the client_spec
        :type base_spec: k8s.V1PodSpec
        :param client_spec: the spec that the client wants to create.
        :type client_spec: k8s.V1PodSpec
        :return: the merged specs
        """
        if base_spec and not client_spec:
            return base_spec
        if not base_spec and client_spec:
            return client_spec
        elif client_spec and base_spec:
            client_spec.containers = PodGenerator.reconcile_containers(
                base_spec.containers, client_spec.containers
            )
            merged_spec = extend_object_field(base_spec, client_spec, 'volumes')
            return merge_objects(base_spec, merged_spec)
        return None
    @staticmethod
    def reconcile_containers(base_containers: List[k8s.V1Container],
                             client_containers: List[k8s.V1Container]) -> List[k8s.V1Container]:
        """
        :param base_containers: has the base attributes which are overwritten if they exist
            in the client_containers and remain if they do not exist in the client_containers
        :type base_containers: List[k8s.V1Container]
        :param client_containers: the containers that the client wants to create.
        :type client_containers: List[k8s.V1Container]
        :return: the merged containers
        The runs recursively over the list of containers.
        """
        if not base_containers:
            return client_containers
        if not client_containers:
            return base_containers
        client_container = client_containers[0]
        base_container = base_containers[0]
        client_container = extend_object_field(base_container, client_container, 'volume_mounts')
        client_container = extend_object_field(base_container, client_container, 'env')
        client_container = extend_object_field(base_container, client_container, 'env_from')
        client_container = extend_object_field(base_container, client_container, 'ports')
        client_container = extend_object_field(base_container, client_container, 'volume_devices')
        client_container = merge_objects(base_container, client_container)
        return [client_container] + PodGenerator.reconcile_containers(
            base_containers[1:], client_containers[1:]
        )
    @staticmethod
    def construct_pod(
        dag_id: str,
        task_id: str,
        pod_id: str,
        try_number: int,
        date: str,
        command: List[str],
        kube_executor_config: Optional[k8s.V1Pod],
        worker_config: k8s.V1Pod,
        namespace: str,
        worker_uuid: str
    ) -> k8s.V1Pod:
        """
        Construct a pod by gathering and consolidating the configuration from 3 places:
            - airflow.cfg
            - executor_config
            - dynamic arguments
        """
        dynamic_pod = PodGenerator(
            namespace=namespace,
            labels={
                'airflow-worker': worker_uuid,
                'dag_id': dag_id,
                'task_id': task_id,
                'execution_date': date,
                'try_number': str(try_number),
                'airflow_version': airflow_version.replace('+', '-'),
                'kubernetes_executor': 'True',
            },
            cmds=command,
            name=pod_id
        ).gen_pod()
        # Reconcile the pods starting with the first chronologically,
        # Pod from the airflow.cfg -> Pod from executor_config arg -> Pod from the K8s executor
        pod_list = [worker_config, kube_executor_config, dynamic_pod]
        return reduce(PodGenerator.reconcile_pods, pod_list)
    @staticmethod
    def deserialize_model_file(path: str) -> k8s.V1Pod:
        """
        :param path: Path to the file
        :return: a kubernetes.client.models.V1Pod
        Unfortunately we need access to the private method
        ``_ApiClient__deserialize_model`` from the kubernetes client.
        This issue is tracked here; https://github.com/kubernetes-client/python/issues/977.
        """
        api_client = ApiClient()
        if os.path.exists(path):
            with open(path) as stream:
                pod = yaml.safe_load(stream)
        else:
            pod = yaml.safe_load(path)
        # pylint: disable=protected-access
        return api_client._ApiClient__deserialize_model(pod, k8s.V1Pod)
    @staticmethod
    def make_unique_pod_id(dag_id):
        """
        Kubernetes pod names must be <= 253 chars and must pass the following regex for
        validation
        ``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
        :param dag_id: a dag_id with only alphanumeric characters
        :return: ``str`` valid Pod name of appropriate length
        """
        if not dag_id:
            return None
        safe_uuid = uuid.uuid4().hex
        safe_pod_id = dag_id[:MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid
        return safe_pod_id
    @staticmethod
    def validate_pod_generator_args(given_args):
        """
        :param given_args: The arguments passed to the PodGenerator constructor.
        :type given_args: dict
        :return: None
        Validate that if `pod` or `pod_template_file` are set that the user is not attempting
        to configure the pod with the other arguments.
        """
        pod_args = list(inspect.signature(PodGenerator).parameters.items())
        def predicate(k, v):
            """
            :param k: an arg to PodGenerator
            :type k: string
            :param v: the parameter of the given arg
            :type v: inspect.Parameter
            :return: bool
            returns True if the PodGenerator argument has no default arguments
            or the default argument is None, and it is not one of the listed field
            in `non_empty_fields`.
            """
            non_empty_fields = {
                'pod', 'pod_template_file', 'extract_xcom', 'service_account_name', 'image_pull_policy',
                'restart_policy'
            }
            return (v.default is None or v.default is v.empty) and k not in non_empty_fields
        args_without_defaults = {k: given_args[k] for k, v in pod_args if predicate(k, v) and given_args[k]}
        if given_args['pod'] and given_args['pod_template_file']:
            raise AirflowConfigException("Cannot pass both `pod` and `pod_template_file` arguments")
        if args_without_defaults and (given_args['pod'] or given_args['pod_template_file']):
            raise AirflowConfigException(
                "Cannot configure pod and pass either `pod` or `pod_template_file`. Fields {} passed.".format(
                    list(args_without_defaults.keys())
                )
            )
def merge_objects(base_obj, client_obj):
    """
    :param base_obj: has the base attributes which are overwritten if they exist
        in the client_obj and remain if they do not exist in the client_obj
    :param client_obj: the object that the client wants to create.
    :return: the merged objects
    """
    if not base_obj:
        return client_obj
    if not client_obj:
        return base_obj
    client_obj_cp = copy.deepcopy(client_obj)
    for base_key in base_obj.to_dict().keys():
        base_val = getattr(base_obj, base_key, None)
        if not getattr(client_obj, base_key, None) and base_val:
            setattr(client_obj_cp, base_key, base_val)
    return client_obj_cp
def extend_object_field(base_obj, client_obj, field_name):
    """
    :param base_obj: an object which has a property `field_name` that is a list
    :param client_obj: an object which has a property `field_name` that is a list.
        A copy of this object is returned with `field_name` modified
    :param field_name: the name of the list field
    :type field_name: str
    :return: the client_obj with the property `field_name` being the two properties appended
    """
    client_obj_cp = copy.deepcopy(client_obj)
    base_obj_field = getattr(base_obj, field_name, None)
    client_obj_field = getattr(client_obj, field_name, None)
    if (not isinstance(base_obj_field, list) and base_obj_field is not None) or \
       (not isinstance(client_obj_field, list) and client_obj_field is not None):
        raise ValueError("The chosen field must be a list.")
    if not base_obj_field:
        return client_obj_cp
    if not client_obj_field:
        setattr(client_obj_cp, field_name, base_obj_field)
        return client_obj_cp
    appended_fields = base_obj_field + client_obj_field
    setattr(client_obj_cp, field_name, appended_fields)
    return client_obj_cp
 | 
	apache-2.0 | -4,377,916,697,781,541,400 | 38.398214 | 110 | 0.632235 | false | 
| 
	rochoa85/pyMIC | 
	examples/data_map/data_map.py | 
	1 | 
	1674 | 
	#!/usr/bin/python
# Copyright (c) 2014, Intel Corporation All rights reserved. 
# 
# Redistribution and use in source and binary forms, with or without 
# modification, are permitted provided that the following conditions are 
# met: 
# 
# 1. Redistributions of source code must retain the above copyright 
# notice, this list of conditions and the following disclaimer. 
#
# 2. Redistributions in binary form must reproduce the above copyright 
# notice, this list of conditions and the following disclaimer in the 
# documentation and/or other materials provided with the distribution. 
#
# 3. Neither the name of the copyright holder nor the names of its 
# contributors may be used to endorse or promote products derived from 
# this software without specific prior written permission. 
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
import pyMIC as mic
import numpy as np
device = mic.devices[0]
a = device.zeros((8,8))
 | 
	bsd-3-clause | -9,068,146,142,489,116,000 | 43.052632 | 75 | 0.771207 | false | 
| 
	javierTerry/odoo | 
	addons/email_template/html2text.py | 
	440 | 
	14143 | 
	#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "2.36"
__author__ = "Aaron Swartz ([email protected])"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
#   Support decoded entities with unifiable.
if not hasattr(__builtins__, 'True'): True, False = 1, 0
import re, sys, urllib, htmlentitydefs, codecs
import sgmllib
import urlparse
sgmllib.charref = re.compile('&#([xX]?[0-9a-fA-F]+)[^0-9a-fA-F]')
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = False
### Entity Nonsense ###
def name2cp(k):
    if k == 'apos': return ord("'")
    if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
        return htmlentitydefs.name2codepoint[k]
    else:
        k = htmlentitydefs.entitydefs[k]
        if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
        return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u'}
unifiable_n = {}
for k in unifiable.keys():
    unifiable_n[name2cp(k)] = unifiable[k]
def charref(name):
    if name[0] in ['x','X']:
        c = int(name[1:], 16)
    else:
        c = int(name)
    if not UNICODE_SNOB and c in unifiable_n.keys():
        return unifiable_n[c]
    else:
        return unichr(c)
def entityref(c):
    if not UNICODE_SNOB and c in unifiable.keys():
        return unifiable[c]
    else:
        try: name2cp(c)
        except KeyError: return "&" + c
        else: return unichr(name2cp(c))
def replaceEntities(s):
    s = s.group(1)
    if s[0] == "#":
        return charref(s[1:])
    else: return entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(s):
    return r_unescape.sub(replaceEntities, s)
def fixattrs(attrs):
    # Fix bug in sgmllib.py
    if not attrs: return attrs
    newattrs = []
    for attr in attrs:
        newattrs.append((attr[0], unescape(attr[1])))
    return newattrs
### End Entity Nonsense ###
def onlywhite(line):
    """Return true if the line does only consist of whitespace characters."""
    for c in line:
        if c is not ' ' and c is not '  ':
            return c is ' '
    return line
def optwrap(text):
    """Wrap all paragraphs in the provided text."""
    if not BODY_WIDTH:
        return text
    assert wrap, "Requires Python 2.3."
    result = ''
    newlines = 0
    for para in text.split("\n"):
        if len(para) > 0:
            if para[0] is not ' ' and para[0] is not '-' and para[0] is not '*':
                for line in wrap(para, BODY_WIDTH):
                    result += line + "\n"
                result += "\n"
                newlines = 2
            else:
                if not onlywhite(para):
                    result += para + "\n"
                    newlines = 1
        else:
            if newlines < 2:
                result += "\n"
                newlines += 1
    return result
def hn(tag):
    if tag[0] == 'h' and len(tag) == 2:
        try:
            n = int(tag[1])
            if n in range(1, 10): return n
        except ValueError: return 0
class _html2text(sgmllib.SGMLParser):
    def __init__(self, out=sys.stdout.write, baseurl=''):
        sgmllib.SGMLParser.__init__(self)
        if out is None: self.out = self.outtextf
        else: self.out = out
        self.outtext = u''
        self.quiet = 0
        self.p_p = 0
        self.outcount = 0
        self.start = 1
        self.space = 0
        self.a = []
        self.astack = []
        self.acount = 0
        self.list = []
        self.blockquote = 0
        self.pre = 0
        self.startpre = 0
        self.lastWasNL = 0
        self.abbr_title = None # current abbreviation definition
        self.abbr_data = None # last inner HTML (for abbr being defined)
        self.abbr_list = {} # stack of abbreviations to write later
        self.baseurl = baseurl
    def outtextf(self, s):
        self.outtext += s
    def close(self):
        sgmllib.SGMLParser.close(self)
        self.pbr()
        self.o('', 0, 'end')
        return self.outtext
    def handle_charref(self, c):
        self.o(charref(c))
    def handle_entityref(self, c):
        self.o(entityref(c))
    def unknown_starttag(self, tag, attrs):
        self.handle_tag(tag, attrs, 1)
    def unknown_endtag(self, tag):
        self.handle_tag(tag, None, 0)
    def previousIndex(self, attrs):
        """ returns the index of certain set of attributes (of a link) in the
            self.a list
            If the set of attributes is not found, returns None
        """
        if not attrs.has_key('href'): return None
        i = -1
        for a in self.a:
            i += 1
            match = 0
            if a.has_key('href') and a['href'] == attrs['href']:
                if a.has_key('title') or attrs.has_key('title'):
                        if (a.has_key('title') and attrs.has_key('title') and
                            a['title'] == attrs['title']):
                            match = True
                else:
                    match = True
            if match: return i
    def handle_tag(self, tag, attrs, start):
        attrs = fixattrs(attrs)
        if hn(tag):
            self.p()
            if start: self.o(hn(tag)*"#" + ' ')
        if tag in ['p', 'div']: self.p()
        if tag == "br" and start: self.o("  \n")
        if tag == "hr" and start:
            self.p()
            self.o("* * *")
            self.p()
        if tag in ["head", "style", 'script']:
            if start: self.quiet += 1
            else: self.quiet -= 1
        if tag in ["body"]:
            self.quiet = 0 # sites like 9rules.com never close <head>
        if tag == "blockquote":
            if start:
                self.p(); self.o('> ', 0, 1); self.start = 1
                self.blockquote += 1
            else:
                self.blockquote -= 1
                self.p()
        if tag in ['em', 'i', 'u']: self.o("_")
        if tag in ['strong', 'b']: self.o("**")
        if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
        if tag == "abbr":
            if start:
                attrsD = {}
                for (x, y) in attrs: attrsD[x] = y
                attrs = attrsD
                self.abbr_title = None
                self.abbr_data = ''
                if attrs.has_key('title'):
                    self.abbr_title = attrs['title']
            else:
                if self.abbr_title != None:
                    self.abbr_list[self.abbr_data] = self.abbr_title
                    self.abbr_title = None
                self.abbr_data = ''
        if tag == "a":
            if start:
                attrsD = {}
                for (x, y) in attrs: attrsD[x] = y
                attrs = attrsD
                if attrs.has_key('href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
                    self.astack.append(attrs)
                    self.o("[")
                else:
                    self.astack.append(None)
            else:
                if self.astack:
                    a = self.astack.pop()
                    if a:
                        i = self.previousIndex(a)
                        if i is not None:
                            a = self.a[i]
                        else:
                            self.acount += 1
                            a['count'] = self.acount
                            a['outcount'] = self.outcount
                            self.a.append(a)
                        self.o("][" + `a['count']` + "]")
        if tag == "img" and start:
            attrsD = {}
            for (x, y) in attrs: attrsD[x] = y
            attrs = attrsD
            if attrs.has_key('src'):
                attrs['href'] = attrs['src']
                alt = attrs.get('alt', '')
                i = self.previousIndex(attrs)
                if i is not None:
                    attrs = self.a[i]
                else:
                    self.acount += 1
                    attrs['count'] = self.acount
                    attrs['outcount'] = self.outcount
                    self.a.append(attrs)
                self.o("![")
                self.o(alt)
                self.o("]["+`attrs['count']`+"]")
        if tag == 'dl' and start: self.p()
        if tag == 'dt' and not start: self.pbr()
        if tag == 'dd' and start: self.o('    ')
        if tag == 'dd' and not start: self.pbr()
        if tag in ["ol", "ul"]:
            if start:
                self.list.append({'name':tag, 'num':0})
            else:
                if self.list: self.list.pop()
            self.p()
        if tag == 'li':
            if start:
                self.pbr()
                if self.list: li = self.list[-1]
                else: li = {'name':'ul', 'num':0}
                self.o("  "*len(self.list)) #TODO: line up <ol><li>s > 9 correctly.
                if li['name'] == "ul": self.o("* ")
                elif li['name'] == "ol":
                    li['num'] += 1
                    self.o(`li['num']`+". ")
                self.start = 1
            else:
                self.pbr()
        if tag in ["table", "tr"] and start: self.p()
        if tag == 'td': self.pbr()
        if tag == "pre":
            if start:
                self.startpre = 1
                self.pre = 1
            else:
                self.pre = 0
            self.p()
    def pbr(self):
        if self.p_p == 0: self.p_p = 1
    def p(self):
        self.p_p = 2
    def o(self, data, puredata=0, force=0):
        if self.abbr_data is not None: self.abbr_data += data
        if not self.quiet:
            if puredata and not self.pre:
                data = re.sub('\s+', ' ', data)
                if data and data[0] == ' ':
                    self.space = 1
                    data = data[1:]
            if not data and not force: return
            if self.startpre:
                #self.out(" :") #TODO: not output when already one there
                self.startpre = 0
            bq = (">" * self.blockquote)
            if not (force and data and data[0] == ">") and self.blockquote: bq += " "
            if self.pre:
                bq += "    "
                data = data.replace("\n", "\n"+bq)
            if self.start:
                self.space = 0
                self.p_p = 0
                self.start = 0
            if force == 'end':
                # It's the end.
                self.p_p = 0
                self.out("\n")
                self.space = 0
            if self.p_p:
                self.out(('\n'+bq)*self.p_p)
                self.space = 0
            if self.space:
                if not self.lastWasNL: self.out(' ')
                self.space = 0
            if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
                if force == "end": self.out("\n")
                newa = []
                for link in self.a:
                    if self.outcount > link['outcount']:
                        self.out("   ["+`link['count']`+"]: " + urlparse.urljoin(self.baseurl, link['href']))
                        if link.has_key('title'): self.out(" ("+link['title']+")")
                        self.out("\n")
                    else:
                        newa.append(link)
                if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
                self.a = newa
            if self.abbr_list and force == "end":
                for abbr, definition in self.abbr_list.items():
                    self.out("  *[" + abbr + "]: " + definition + "\n")
            self.p_p = 0
            self.out(data)
            self.lastWasNL = data and data[-1] == '\n'
            self.outcount += 1
    def handle_data(self, data):
        if r'\/script>' in data: self.quiet -= 1
        self.o(data, 1)
    def unknown_decl(self, data):
        pass
def wrapwrite(text): sys.stdout.write(text.encode('utf8'))
def html2text_file(html, out=wrapwrite, baseurl=''):
    h = _html2text(out, baseurl)
    h.feed(html)
    h.feed("")
    return h.close()
def html2text(html, baseurl=''):
    return optwrap(html2text_file(html, None, baseurl))
if __name__ == "__main__":
    baseurl = ''
    if sys.argv[1:]:
        arg = sys.argv[1]
        if arg.startswith('http://'):
            baseurl = arg
            j = urllib.urlopen(baseurl)
            try:
                from feedparser import _getCharacterEncoding as enc
            except ImportError:
                   enc = lambda x, y: ('utf-8', 1)
            text = j.read()
            encoding = enc(j.headers, text)[0]
            if encoding == 'us-ascii': encoding = 'utf-8'
            data = text.decode(encoding)
        else:
            encoding = 'utf8'
            if len(sys.argv) > 2:
                encoding = sys.argv[2]
            f = open(arg, 'r')
            try:
                    data = f.read().decode(encoding)
            finally:
                    f.close()
    else:
        data = sys.stdin.read().decode('utf8')
    wrapwrite(html2text(data, baseurl))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
 | 
	agpl-3.0 | -7,231,034,888,977,510,000 | 29.812636 | 109 | 0.466096 | false | 
| 
	saguila/AlgoritmoA | 
	pqdict.py | 
	2 | 
	16450 | 
	"""Copyright (c) 2012 Nezar Abdennur
This module contains code adapted from the Python implementation of the heapq
module, which was written by Kevin O'Connor and augmented by Tim Peters and
Raymond Hettinger.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""Priority Queue Dictionary -- An indexed priority queue data structure.
Stores a set of prioritized hashable elements. Can be used as an updatable
schedule.
The priority queue is implemented as a binary heap, which supports:         
    - O(1) access to the top priority element        
    - O(log n) removal of the top priority element     
    - O(log n) insertion of a new element
In addition, an internal dictionary or "index" maps elements to their position
in the heap array. This index is kept up-to-date when the heap is manipulated.
As a result, PQD also supports:          
    - O(1) lookup of an arbitrary element's priority key     
    - O(log n) removal of an arbitrary element          
    - O(log n) updating of an arbitrary element's priority key
The standard heap operations used internally (here, called "sink" and "swim")
are based on the code in the python heapq module.* These operations are extended
to preserve correctness of the internal dictionary.
* The names of the methods in heapq (sift up/down) seem to refer to the motion
of the items being compared to, rather than the item being operated on as is
normally done in textbooks (i.e. bubble down/up, instead). I stuck to the
textbook convention, but using the sink/swim nomenclature from Sedgewick et al:
the way I see it, an item that is too "heavy" (low-priority) should sink down
the tree, while one that is too "light" should float or swim up. Note, however,
that the sink implementation is non-conventional. See heapq for details about
why.
""" 
__author__ = ('Nezar Abdennur', '[email protected]')  
__all__ = ['PQDict', 'PQDictEntry', 'heapsort_by_value']
from collections import Mapping, MutableMapping
from abc import ABCMeta, abstractmethod
class PQDictEntry(object):
    __metaclass__ = ABCMeta
    def __init__(self, dkey, pkey):
        self.dkey = dkey
        self.pkey = pkey
    @abstractmethod
    def __lt__(self, other):
        return NotImplemented
    # def set_pkey(self, pkey):
    #     pass
    def __eq__(self, other):
        return self.pkey == other.pkey
    def __repr__(self):
        return self.__class__.__name__ + \
            "(%s: %s)" % (repr(self.dkey), self.pkey)
class MinPQDEntry(PQDictEntry):
    __init__ = PQDictEntry.__init__
    __eq__ = PQDictEntry.__eq__
    def __lt__(self, other):
        return self.pkey < other.pkey
class MaxPQDEntry(PQDictEntry):
    __init__ = PQDictEntry.__init__
    __eq__ = PQDictEntry.__eq__
    def __lt__(self, other):
        return self.pkey > other.pkey
class PQDict(MutableMapping):
    """
    Maps dictionary keys (keys) to priority keys (values). Maintains an
    internal heap so that the highest priority item can always be obtained in
    constant time. The mapping is mutable so items may be added, removed and
    have their priorities updated.
    """
    # Implementation details:
    #   - heap (list): stores (dkey,pkey)-pairs as "entries" (PQDEntry objects).
    #   - nodefinder (dict): maps each dkey to the position of its entry in the 
    #     heap
    #   - the < comparator is used to rank entries
    __slots__ = ('nodefinder', 'heap', 'create_entry')
    create_entry = MinPQDEntry
    __eq__ = MutableMapping.__eq__
    __ne__ = MutableMapping.__ne__
    keys = MutableMapping.keys
    values = MutableMapping.values
    items = MutableMapping.items
    get = MutableMapping.get
    clear = MutableMapping.clear
    update = MutableMapping.update
    setdefault = MutableMapping.setdefault
    #fromkeys
    def __init__(self, *args, **kwargs):
        """
        Mimics the standard dict constructor:
            Accepts a sequence/iterator of (dkey, pkey) pairs.
            Accepts named arguments or an unpacked dictionary.
        Also accepts a single mapping object to convert it to a pqdict.
        The default priority ranking for entries is in decreasing pkey value
        (i.e., a min-pq: LOWER pkey values have a HIGHER rank). This is typical
        for a scheduler, where the higher ranked tasks have earlier times.
        """
        if len(args) > 1:
            raise TypeError
        self.heap = []
        self.nodefinder = {}
        pos = 0
        if args:
            if isinstance(args[0], Mapping):
                seq = args[0].items()
            else:
                seq = args[0]
            try:
                for dkey, pkey in seq:
                    entry = self.create_entry(dkey, pkey)
                    self.heap.append(entry)
                    self.nodefinder[dkey] = pos
                    pos += 1
            except TypeError:
                raise ValueError
        if kwargs:
            for dkey, pkey in kwargs.items():
                entry = self.create_entry(dkey, pkey)
                self.heap.append(entry)
                self.nodefinder[dkey] = pos
                pos += 1
        self._heapify()
    @classmethod
    def minpq(cls, *args, **kwargs):
        pq = cls()
        pq.create_entry = MinPQDEntry
        pq.__init__(*args, **kwargs)
        return pq
    @classmethod
    def maxpq(cls, *args, **kwargs):
        pq = cls()
        pq.create_entry = MaxPQDEntry
        pq.__init__(*args, **kwargs)
        return pq
    @classmethod
    def custompq(cls, entrytype, *args, **kwargs):
        pq = cls()
        if issubclass(entrytype, PQDictEntry):
            pq.create_entry = entrytype
        else:
            raise TypeError('Custom entry class must be a subclass of' \
                            'PQDictEntry')
        pq.__init__(*args, **kwargs)
        return pq
    @classmethod
    def fromfunction(cls, iterable, pkeygen): #instead of fromkeys
        """
        Provide a key function that determines priorities by which to heapify
        the elements of an iterable into a PQD.
        """
        return cls( (dkey, pkeygen(dkey)) for dkey in iterable )
    def __len__(self):
        """
        Return number of items in the PQD.
        """
        return len(self.nodefinder)
    def __contains__(self, dkey):
        """
        Return True if dkey is in the PQD else return False.
        """
        return dkey in self.nodefinder
    def __iter__(self):
        """
        Return an iterator over the dictionary keys of the PQD. The order 
        of iteration is undefined! Use iterkeys() to iterate over dictionary 
        keys sorted by priority.
        """
        for entry in self.heap:
            yield entry.dkey
    def __getitem__(self, dkey):
        """
        Return the priority of dkey. Raises a KeyError if not in the PQD.
        """
        return self.heap[self.nodefinder[dkey]].pkey #raises KeyError
    def __setitem__(self, dkey, pkey):
        """
        Assign priority to dictionary key.
        """
        heap = self.heap
        finder = self.nodefinder
        try:
            pos = finder[dkey]
        except KeyError:
            # add new entry
            n = len(self.heap)
            self.heap.append(self.create_entry(dkey, pkey))
            self.nodefinder[dkey] = n
            self._swim(n)
        else:
            # update existing entry
            heap[pos].pkey = pkey
            parent_pos = (pos - 1) >> 1
            child_pos = 2*pos + 1
            if parent_pos > 0 and heap[pos] < heap[parent_pos]:
                self._swim(pos)
            elif child_pos < len(heap):
                right_pos = child_pos + 1
                if (right_pos < len(heap) 
                        and not heap[child_pos] < heap[right_pos]):
                    child_pos = right_pos
                if heap[child_pos] < heap[pos]:
                    self._sink(pos)
    def __delitem__(self, dkey):
        """
        Remove item. Raises a KeyError if dkey is not in the PQD.
        """
        heap = self.heap
        finder = self.nodefinder
        # Remove very last item and place in vacant spot. Let the new item
        # sink until it reaches its new resting place.
        try:
            pos = finder.pop(dkey)
        except KeyError:
            raise
        else:
            entry = heap[pos]
            last = heap.pop(-1)
            if entry is not last:
                heap[pos] = last
                finder[last.dkey] = pos
                parent_pos = (pos - 1) >> 1
                child_pos = 2*pos + 1
                if parent_pos > 0 and heap[pos] < heap[parent_pos]:
                    self._swim(pos)
                elif child_pos < len(heap):
                    right_pos = child_pos + 1
                    if (right_pos < len(heap) 
                            and not heap[child_pos] < heap[right_pos]):
                        child_pos = right_pos
                    if heap[child_pos] < heap[pos]:
                        self._sink(pos)
            del entry
    def __copy__(self):
        """
        Return a new PQD with the same dkeys associated with the same priority
        keys.
        """
        # We want the two PQDs to behave as different schedules on the same
        # set of dkeys. As a result:
        #   - The new heap list contains copies of all entries because PQDEntry
        #     objects are mutable and should not be shared by two PQDicts.
        #   - The new nodefinder dict (dkey->heap positions) must be a copy of 
        #     the old nodefinder dict since it maps the same dkeys to positions 
        #     in a different list.
        from copy import copy
        other = self.__class__()
        other.heap = [copy(entry) for entry in self.heap]
        other.nodefinder = copy(self.nodefinder)
        return other
    copy = __copy__
    def __repr__(self):
        things = ', '.join(['%s: %s' % (repr(entry.dkey), entry.pkey) 
                                for entry in self.heap])
        return self.__class__.__name__ + '({' + things  + '})'
    __marker = object()
    def pop(self, dkey, default=__marker):
        """
        If dkey is in the PQD, remove it and return its priority key, else 
        return default. If default is not given and dkey is not in the PQD, a 
        KeyError is raised.
        """
        heap = self.heap
        finder = self.nodefinder
        try:
            pos = finder.pop(dkey)
        except KeyError:
            if default is self.__marker:
                raise
            return default
        else:
            delentry = heap[pos]
            last = heap.pop(-1)
            if delentry is not last:
                heap[pos] = last
                finder[last.dkey] = pos
                parent_pos = (pos - 1) >> 1
                child_pos = 2*pos + 1
                if parent_pos > 0 and heap[pos] < heap[parent_pos]:
                    self._swim(pos)
                elif child_pos < len(heap):
                    right_pos = child_pos + 1
                    if (right_pos < len(heap) 
                            and not heap[child_pos] < heap[right_pos]):
                        child_pos = right_pos
                    if heap[child_pos] < heap[pos]:
                        self._sink(pos)
            pkey = delentry.pkey
            del delentry
            return pkey
    def popitem(self):
        """
        Extract top priority item. Raises KeyError if PQD is empty.
        """
        try:
            last = self.heap.pop(-1)
        except IndexError:
            raise KeyError
        else:
            if self.heap:
                entry = self.heap[0]
                self.heap[0] = last
                self.nodefinder[last.dkey] = 0
                self._sink(0)
            else:
                entry = last
            self.nodefinder.pop(entry.dkey)
            return entry.dkey, entry.pkey
    def additem(self, dkey, pkey):
        """
        Add a new item. Raises KeyError if item is already in the PQD.
        """
        if dkey in self.nodefinder:
            raise KeyError
        self[dkey] = pkey
    def updateitem(self, dkey, new_pkey):
        """
        Update the priority key of an existing item. Raises KeyError if item is
        not in the PQD.
        """
        if dkey not in self.nodefinder:
            raise KeyError
        self[dkey] = new_pkey
    def peek(self):
        """
        Get top priority item.
        """
        try:
            entry = self.heap[0]
        except IndexError:
            raise KeyError
        return entry.dkey, entry.pkey
    def iterkeys(self):
        """
        Destructive heapsort iterator over dictionary keys, ordered by priority
        key.
        """
        try:
            while True:
                yield self.popitem()[0]
        except KeyError:
            return
    def itervalues(self):
        """
        Destructive heapsort iterator over priority keys.
        """
        try:
            while True:
                yield self.popitem()[1]
        except KeyError:
            return
    def iteritems(self):
        """
        Destructive heapsort iterator over items, ordered by priority key.
        """
        try:
            while True:
                yield self.popitem()
        except KeyError:
            return
    def _heapify(self):
        n = len(self.heap)
        for pos in reversed(range(n//2)):
            self._sink(pos)
    def _sink(self, top=0):
        heap = self.heap
        finder = self.nodefinder
        # Peel off top item
        pos = top
        entry = heap[pos]
        # Sift up a trail of child nodes
        child_pos = 2*pos + 1
        while child_pos < len(heap):
            # Choose the index of smaller child.
            right_pos = child_pos + 1
            if right_pos < len(heap) and not heap[child_pos] < heap[right_pos]:
                child_pos = right_pos
            # Move the smaller child up.
            child_entry = heap[child_pos]
            heap[pos] = child_entry
            finder[child_entry.dkey] = pos
            pos = child_pos
            child_pos = 2*pos + 1
        # We are now at a leaf. Put item there and let it swim until it reaches
        # its new resting place.
        heap[pos] = entry
        finder[entry.dkey] = pos
        self._swim(pos, top)
    def _swim(self, pos, top=0):
        heap = self.heap
        finder = self.nodefinder
        # Remove item from its place
        entry = heap[pos]
        # Bubble item up by sifting parents down until finding a place it fits.
        while pos > top:
            parent_pos = (pos - 1) >> 1
            parent_entry = heap[parent_pos]
            if entry < parent_entry:
                heap[pos] = parent_entry
                finder[parent_entry.dkey] = pos
                pos = parent_pos
                continue
            break
        # Put item in its new place
        heap[pos] = entry
        finder[entry.dkey] = pos
    
def heapsorted_by_value(mapping, maxheap=False):
    """
    Takes an arbitrary mapping and, treating the values as priority keys, sorts
    its items by priority via heapsort using a PQDict.
    Returns:
        a list of the dictionary items sorted by value
    """
    if maxheap:
        pq = PQDict.maxpq(mapping)
    else:
        pq = PQDict(mapping)
    return [item for item in pq.iteritems()] | 
	mit | -5,154,796,241,812,267,000 | 31.193738 | 80 | 0.561641 | false | 
| 
	ashwinikd/xhprof-dataextract | 
	lib/sql.py | 
	1 | 
	1165 | 
	DROP_TBL_PERFDATA = "DROP TABLE perfdata"
CREATE_TBL_PERFDATA = """CREATE TABLE `perfdata` (
  `pk` int(11) NOT NULL AUTO_INCREMENT,
  `id` varchar(16) NOT NULL,
  `fn` varchar(256) NOT NULL,
  `wt` int(11) NOT NULL,
  `ct` int(11) NOT NULL,
  `pmu` int(11) NOT NULL,
  `mu` int(11) NOT NULL,
  `cpu` int(11) NOT NULL,
  `rec_on` datetime NOT NULL,
  PRIMARY KEY (`pk`)
)"""
CREATE_TBL_PARENT_CHILD = """CREATE TABLE IF NOT EXISTS `parent_child` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `run` varchar(32) NOT NULL,
  `parent` varchar(128) DEFAULT NULL,
  `child` varchar(128) NOT NULL,
  `wt` int(11) NOT NULL,
  `pmu` int(11) NOT NULL,
  `mu` int(11) NOT NULL,
  `cpu` int(11) NOT NULL,
  `ct` int(11) NOT NULL,
  `rec_on` datetime NOT NULL,
  PRIMARY KEY (`id`)
)""";
SELECT_DETAILS = "select id, get, post, cookie, perfdata, `timestamp` from details"
INSERT_INTO_PC = """insert into parent_child (run, parent, child, ct, wt, cpu, mu, pmu, rec_on) values
        (
        %(run_id)s,
        %(parent)s,
        %(child)s,
        %(callnum)s,
        %(walltime)s,
        %(proc)s,
        %(mem)s,
        %(peakmem)s,
        %(rec_on)s
        )""";
 | 
	bsd-3-clause | -5,667,717,924,082,559,000 | 26.093023 | 102 | 0.582833 | false | 
| 
	laurent-george/weboob | 
	modules/barclays/pages.py | 
	6 | 
	10301 | 
	# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from decimal import Decimal
import re
from weboob.deprecated.browser import Page
from weboob.capabilities.bank import Account
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
class LoginPage(Page):
    def login(self, login, passwd):
        self.browser.select_form(name='frmLogin')
        self.browser['username'] = login.encode(self.browser.ENCODING)
        self.browser['password'] = passwd.encode(self.browser.ENCODING)
        self.browser.submit(nologin=True)
    def has_redirect(self):
        if len(self.document.getroot().xpath('//form')) > 0:
            return False
        else:
            return True
class Login2Page(Page):
    def login(self, secret):
        label = self.document.xpath('//span[@class="PF_LABEL"]')[0].text.strip()
        letters = ''
        for n in re.findall('(\d+)', label):
            letters += secret[int(n) - 1]
        self.browser.select_form(name='frmControl')
        self.browser['word'] = letters
        self.browser.submit(name='valider', nologin=True)
class IndexPage(Page):
    pass
class AccountsPage(Page):
    ACCOUNT_TYPES = {u'Epargne':                Account.TYPE_SAVINGS,
                     u'Liquidités':             Account.TYPE_CHECKING,
                     u'Titres':                 Account.TYPE_MARKET,
                     u'Prêts':                  Account.TYPE_LOAN,
                    }
    def get_list(self):
        accounts = []
        for block in self.document.xpath('//div[@class="pave"]/div'):
            head_type = block.xpath('./div/span[@class="accGroupLabel"]')[0].text.strip()
            account_type = self.ACCOUNT_TYPES.get(head_type, Account.TYPE_UNKNOWN)
            for tr in block.cssselect('ul li.tbord_account'):
                id = tr.attrib.get('id', '')
                if id.find('contratId') != 0:
                    self.logger.warning('Unable to parse contract ID: %r' % id)
                    continue
                id = id[id.find('contratId')+len('contratId'):]
                link = tr.cssselect('span.accountLabel a')[0]
                balance = Decimal(FrenchTransaction.clean_amount(tr.cssselect('span.accountTotal')[0].text))
                if id.endswith('CRT'):
                    account = accounts[-1]
                    account._card_links.append(link.attrib['href'])
                    if not account.coming:
                        account.coming = Decimal('0.0')
                    account.coming += balance
                    continue
                account = Account()
                account.id = id
                account.label = unicode(link.text.strip())
                account.type = account_type
                account.balance = balance
                account.currency = account.get_currency(tr.cssselect('span.accountDev')[0].text)
                account._link = link.attrib['href']
                account._card_links = []
                accounts.append(account)
        if len(accounts) == 0:
            # Sometimes, accounts are only in javascript...
            for script in self.document.xpath('//script'):
                text = script.text
                if text is None:
                    continue
                if 'remotePerso' not in text:
                    continue
                account = None
                attribs = {}
                account_type = Account.TYPE_UNKNOWN
                for line in text.split('\n'):
                    line = line.strip()
                    m = re.match("data.libelle = '(.*)';", line)
                    if m:
                        account_type = self.ACCOUNT_TYPES.get(m.group(1), Account.TYPE_UNKNOWN)
                    elif line == 'var remotePerso = new Object;':
                        account = Account()
                    elif account is not None:
                        m = re.match("remotePerso.(\w+) = '?(.*?)'?;", line)
                        if m:
                            attribs[m.group(1)] = m.group(2)
                        elif line.startswith('listProduitsGroup'):
                            account.id = attribs['refContrat']
                            account.label = attribs['libelle']
                            account.type = account_type
                            account.balance = Decimal(FrenchTransaction.clean_amount(attribs['soldeDateOpeValeurFormatted']))
                            account.currency = account.get_currency(attribs['codeDevise'])
                            account._link = 'tbord.do?id=%s' % attribs['id']
                            account._card_links = []
                            if account.id.endswith('CRT'):
                                a = accounts[-1]
                                a._card_links.append(account._link)
                                if not a.coming:
                                    a.coming = Decimal('0.0')
                                a.coming += account.balance
                            else:
                                accounts.append(account)
                            account = None
        return accounts
class Transaction(FrenchTransaction):
    PATTERNS = [(re.compile('^RET DAB (?P<text>.*?) RETRAIT DU (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}).*'),
                                                              FrenchTransaction.TYPE_WITHDRAWAL),
                (re.compile('^RET DAB (?P<text>.*?) CARTE ?:.*'),
                                                              FrenchTransaction.TYPE_WITHDRAWAL),
                (re.compile('^RET DAB (?P<dd>\d{2})/(?P<mm>\d{2})/(?P<yy>\d{2}) (?P<text>.*?) CARTE .*'),
                                                              FrenchTransaction.TYPE_WITHDRAWAL),
                (re.compile('^(?P<text>.*) RETRAIT DU (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) .*'),
                                                              FrenchTransaction.TYPE_WITHDRAWAL),
                (re.compile('(\w+) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) CB[:\*][^ ]+ (?P<text>.*)'),
                                                              FrenchTransaction.TYPE_CARD),
                (re.compile('^(?P<category>VIR(EMEN)?T? (SEPA)?(RECU|FAVEUR)?)( /FRM)?(?P<text>.*)'),
                                                              FrenchTransaction.TYPE_TRANSFER),
                (re.compile('^PRLV (?P<text>.*) (REF \w+)?$'),FrenchTransaction.TYPE_ORDER),
                (re.compile('^CHEQUE.*? (REF \w+)?$'),        FrenchTransaction.TYPE_CHECK),
                (re.compile('^(AGIOS /|FRAIS) (?P<text>.*)'), FrenchTransaction.TYPE_BANK),
                (re.compile('^(CONVENTION \d+ )?COTIS(ATION)? (?P<text>.*)'),
                                                              FrenchTransaction.TYPE_BANK),
                (re.compile('^REMISE (?P<text>.*)'),          FrenchTransaction.TYPE_DEPOSIT),
                (re.compile('^(?P<text>.*)( \d+)? QUITTANCE .*'),
                                                              FrenchTransaction.TYPE_ORDER),
                (re.compile('^.* LE (?P<dd>\d{2})/(?P<mm>\d{2})/(?P<yy>\d{2})$'),
                                                              FrenchTransaction.TYPE_UNKNOWN),
               ]
class HistoryBasePage(Page):
    def get_history(self):
        self.logger.warning('Do not support account of type %s' % type(self).__name__)
        return iter([])
class TransactionsPage(HistoryBasePage):
    def get_history(self):
        for tr in self.document.xpath('//table[@id="operation"]/tbody/tr'):
            tds = tr.findall('td')
            if len(tds) < 5:
                continue
            t = Transaction(tds[-1].findall('img')[-1].attrib.get('id', ''))
            date = u''.join([txt.strip() for txt in tds[0].itertext()])
            raw = u' '.join([txt.strip() for txt in tds[1].itertext()])
            debit = u''.join([txt.strip() for txt in tds[-3].itertext()])
            credit = u''.join([txt.strip() for txt in tds[-2].itertext()])
            t.parse(date, re.sub(r'[ ]+', ' ', raw))
            t.set_amount(credit, debit)
            t._coming = False
            if t.raw.startswith('ACHAT CARTE -DEBIT DIFFERE'):
                continue
            yield t
class CardPage(HistoryBasePage):
    def get_history(self):
        debit_date = None
        coming = True
        for tr in self.document.xpath('//table[@class="report"]/tbody/tr'):
            tds = tr.findall('td')
            if len(tds) == 2:
                # headers
                m = re.match('.* (\d+)/(\d+)/(\d+)', tds[0].text.strip())
                debit_date = datetime.date(int(m.group(3)), int(m.group(2)), int(m.group(1)))
                if debit_date < datetime.date.today():
                    coming = False
            if len(tds) != 3:
                continue
            t = Transaction(0)
            date = u''.join([txt.strip() for txt in tds[0].itertext()])
            raw = u' '.join([txt.strip() for txt in tds[1].itertext()])
            amount = u''.join([txt.strip() for txt in tds[-1].itertext()])
            t.parse(date, re.sub(r'[ ]+', ' ', raw))
            if debit_date is not None:
                t.date = debit_date
            t.label = unicode(tds[1].find('span').text.strip())
            t.type = t.TYPE_CARD
            t._coming = coming
            t.set_amount(amount)
            yield t
class ValuationPage(HistoryBasePage):
    pass
class LoanPage(HistoryBasePage):
    pass
class MarketPage(HistoryBasePage):
    pass
class AssurancePage(HistoryBasePage):
    pass
 | 
	agpl-3.0 | -2,684,563,615,446,834,700 | 40.865854 | 125 | 0.491407 | false | 
| 
	nash-x/hws | 
	neutron/service.py | 
	3 | 
	10505 | 
	# Copyright 2011 VMware, Inc
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import inspect
import logging as std_logging
import os
import random
from oslo.config import cfg
from oslo.messaging import server as rpc_server
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import api as session
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import service as common_service
from neutron import wsgi
service_opts = [
    cfg.IntOpt('periodic_interval',
               default=40,
               help=_('Seconds between running periodic tasks')),
    cfg.IntOpt('api_workers',
               default=0,
               help=_('Number of separate API worker processes for service')),
    cfg.IntOpt('rpc_workers',
               default=0,
               help=_('Number of RPC worker processes for service')),
    cfg.IntOpt('periodic_fuzzy_delay',
               default=5,
               help=_('Range of seconds to randomly delay when starting the '
                      'periodic task scheduler to reduce stampeding. '
                      '(Disable by setting to 0)')),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class WsgiService(object):
    """Base class for WSGI based services.
    For each api you define, you must also define these flags:
    :<api>_listen: The address on which to listen
    :<api>_listen_port: The port on which to listen
    """
    def __init__(self, app_name):
        self.app_name = app_name
        self.wsgi_app = None
    def start(self):
        self.wsgi_app = _run_wsgi(self.app_name)
    def wait(self):
        self.wsgi_app.wait()
class NeutronApiService(WsgiService):
    """Class for neutron-api service."""
    @classmethod
    def create(cls, app_name='neutron'):
        # Setup logging early, supplying both the CLI options and the
        # configuration mapping from the config file
        # We only update the conf dict for the verbose and debug
        # flags. Everything else must be set up in the conf file...
        # Log the options used when starting if we're in debug mode...
        config.setup_logging()
        # Dump the initial option values
        cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
        service = cls(app_name)
        return service
def serve_wsgi(cls):
    try:
        service = cls.create()
        service.start()
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(_('Unrecoverable error: please check log '
                            'for details.'))
    return service
class RpcWorker(object):
    """Wraps a worker to be handled by ProcessLauncher"""
    def __init__(self, plugin):
        self._plugin = plugin
        self._servers = []
    def start(self):
        # We may have just forked from parent process.  A quick disposal of the
        # existing sql connections avoids producing errors later when they are
        # discovered to be broken.
        session.get_engine().pool.dispose()
        self._servers = self._plugin.start_rpc_listeners()
    def wait(self):
        for server in self._servers:
            if isinstance(server, rpc_server.MessageHandlingServer):
                server.wait()
    def stop(self):
        for server in self._servers:
            if isinstance(server, rpc_server.MessageHandlingServer):
                server.kill()
            self._servers = []
def serve_rpc():
    plugin = manager.NeutronManager.get_plugin()
    # If 0 < rpc_workers then start_rpc_listeners would be called in a
    # subprocess and we cannot simply catch the NotImplementedError.  It is
    # simpler to check this up front by testing whether the plugin supports
    # multiple RPC workers.
    if not plugin.rpc_workers_supported():
        LOG.debug(_("Active plugin doesn't implement start_rpc_listeners"))
        if 0 < cfg.CONF.rpc_workers:
            msg = _("'rpc_workers = %d' ignored because start_rpc_listeners "
                    "is not implemented.")
            LOG.error(msg, cfg.CONF.rpc_workers)
        raise NotImplementedError()
    try:
        rpc = RpcWorker(plugin)
        if cfg.CONF.rpc_workers < 1:
            rpc.start()
            return rpc
        else:
            launcher = common_service.ProcessLauncher(wait_interval=1.0)
            launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers)
            return launcher
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(_('Unrecoverable error: please check log '
                            'for details.'))
def _run_wsgi(app_name):
    app = config.load_paste_app(app_name)
    if not app:
        LOG.error(_('No known API applications configured.'))
        return
    server = wsgi.Server("Neutron")
    server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host,
                 workers=cfg.CONF.api_workers)
    # Dump all option values here after all options are parsed
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
    LOG.info(_("Neutron service started, listening on %(host)s:%(port)s"),
             {'host': cfg.CONF.bind_host,
              'port': cfg.CONF.bind_port})
    return server
class Service(n_rpc.Service):
    """Service object for binaries running on hosts.
    A service takes a manager and enables rpc by listening to queues based
    on topic. It also periodically runs tasks on the manager.
    """
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 *args, **kwargs):
        self.binary = binary
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        self.manager = manager_class(host=host, *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []
        super(Service, self).__init__(host, topic, manager=self.manager)
    def start(self):
        self.manager.init_host()
        super(Service, self).start()
        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)
        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None
            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
        self.manager.after_start()
    def __getattr__(self, key):
        manager = self.__dict__.get('manager', None)
        return getattr(manager, key)
    @classmethod
    def create(cls, host=None, binary=None, topic=None, manager=None,
               report_interval=None, periodic_interval=None,
               periodic_fuzzy_delay=None):
        """Instantiates class and passes back application object.
        :param host: defaults to CONF.host
        :param binary: defaults to basename of executable
        :param topic: defaults to bin_name - 'nova-' part
        :param manager: defaults to CONF.<topic>_manager
        :param report_interval: defaults to CONF.report_interval
        :param periodic_interval: defaults to CONF.periodic_interval
        :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
        """
        if not host:
            host = CONF.host
        if not binary:
            binary = os.path.basename(inspect.stack()[-1][1])
        if not topic:
            topic = binary.rpartition('neutron-')[2]
            topic = topic.replace("-", "_")
        if not manager:
            manager = CONF.get('%s_manager' % topic, None)
        if report_interval is None:
            report_interval = CONF.report_interval
        if periodic_interval is None:
            periodic_interval = CONF.periodic_interval
        if periodic_fuzzy_delay is None:
            periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
        service_obj = cls(host, binary, topic, manager,
                          report_interval=report_interval,
                          periodic_interval=periodic_interval,
                          periodic_fuzzy_delay=periodic_fuzzy_delay)
        return service_obj
    def kill(self):
        """Destroy the service object."""
        self.stop()
    def stop(self):
        super(Service, self).stop()
        for x in self.timers:
            try:
                x.stop()
            except Exception:
                LOG.exception(_("Exception occurs when timer stops"))
                pass
        self.timers = []
    def wait(self):
        super(Service, self).wait()
        for x in self.timers:
            try:
                x.wait()
            except Exception:
                LOG.exception(_("Exception occurs when waiting for timer"))
                pass
    def periodic_tasks(self, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        ctxt = context.get_admin_context()
        self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
    def report_state(self):
        """Update the state of this service."""
        # Todo(gongysh) report state to neutron server
        pass
 | 
	apache-2.0 | -2,348,898,602,650,476,500 | 34.133779 | 79 | 0.620086 | false | 
| 
	mclaughlin6464/pasta | 
	pasta/ising.py | 
	1 | 
	5474 | 
	'''
This is a dummy file for me to get started making an Ising model. I'll get this 2-D Ising running, then generalize.
'''
import argparse
from itertools import izip
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
def run_ising(N, d, K, J,h, n_steps, plot = False):
    '''
    :param N:
    :param d:
    :param K:
    :param J:
    :param h:
    :param n_steps:
    :param plot:
    :return:
    '''
    if plot:
        try:
            assert d <= 2
        except AssertionError:
            raise AssertionError("Can only plot in one or two dimensions.")
    #TODO wrap these better
    assert N >0 and N < 1000
    assert d > 0
    assert n_steps > 0
    np.random.seed(0)
    size = tuple(N for i in xrange(d))
    lattice = np.ones(size)
    #make a random initial state
    lattice-= np.random.randint(0,2, size =size)*2
    # do different initialization
    E_0 = energy(lattice, potential, K, h)
    if plot:
        plt.ion()
    for step in xrange(n_steps):
        if step%1000 == 0:
            print step
        site = tuple(np.random.randint(0, N, size=d))
        # consider flipping this site
        lattice[site] *= -1
        E_f = energy(lattice, potential, K, h)
        # if E_F < E_0, keep
        # if E_F > E_0, keep randomly given change of energies
        if E_f >= E_0:
            keep = np.random.uniform() < np.exp(K / J * (E_0 - E_f))
        else:
            keep = True
        if keep:
            E_0 = E_f
        else:
            lattice[site] *= -1
        # fig = plt.figure()
        if plot and step % 100 == 0:
            if d == 1:
                plt.imshow(lattice.reshape((1, -1)),interpolation='none')
            else:
                plt.imshow(lattice, interpolation='none')
            plt.title(correlation(lattice, N/2))
            plt.pause(0.01)
            plt.clf()
    return np.array([correlation(lattice, r) for r in xrange(1, N/2+1)])
def get_NN(site, N, d, r= 1):
    '''
    The NN of the site. Will only return those UP in index (east, south, and down) to avoid double counting.
    Accounts for PBC
    :param site:
        (d,) array of coordinates in the lattice
    :param N:
        Size of one side of the lattice
    :param d:
        dimension of the lattice
    :return:
        dxd numpy array where each row corresponds to the nearest neighbors.
    '''
    mult_sites = np.r_[ [site for i in xrange(d)]]
    adjustment = np.eye(d)*r
    return ((mult_sites+adjustment)%N).astype(int)
def potential(s1, s2, K, h):
    '''
    Basic Ising potential
    :param s1:
        First spin (-1 or 1)
    :param s2:
        Second spin
    :param K:
        Coupling constant
    :return:
        Energy of this particular bond
    '''
    return -1*K*s1*s2 - h/2*(s1+s2)#should this be abstracted to call the NN function?
def energy(lattice, potential, K, h = 0):
    '''
    Calculate the energy of a lattice
    :param lattice:
        Lattice to calculate the energy on
    :param potential:
        Function defining the potential of a given site.
    :return:
        Energy of the lattice
    '''
    N = lattice.shape[0]
    d = len(lattice.shape)
    dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing = 'ij')
    all_sites = izip(*[slice.flatten() for slice in dim_slices])
    E = 0
    for site in all_sites:
        nn = get_NN(site, N, d)
        for neighbor in nn:
            E+=potential(lattice[site], lattice[tuple(neighbor)],K = K, h = h)
    return E
def magnetization(lattice):
    return lattice.mean()
def correlation(lattice, r):
    '''
    The average spin correlation at distance r.
    :param lattice:
        The lattice to calculate the statistic on.
    :param r:
        Distance to measure correlation
    :return:
    '''
    N = lattice.shape[0]
    d = len(lattice.shape)
    dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing='ij')
    all_sites = izip(*[slice.flatten() for slice in dim_slices])
    xi = 0
    for site in all_sites:
        nn = get_NN(site, N, d, r)
        for neighbor in nn:
            xi += lattice[site]*lattice[tuple(neighbor)]
    return xi/((N**d)*d)
if __name__  == '__main__':
    parser = argparse.ArgumentParser(description='Simulate an ising model')
    parser.add_argument('N', type = int, help = 'Length of one side of the cube.')
    parser.add_argument('d', type = int, help = 'Number of dimensions of the cube.')
    #parser.add_argument('K', type = float, help ='Bond coupling strength.')
    parser.add_argument('J', type = float, default = 1.0, nargs = '?',\
                        help = 'Energy of bond strength. Optional, default is 1.')
    parser.add_argument('h', type = float, default=0.0, nargs = '?',\
                        help = 'Magnetic field strength. Optional, default is 0.')
    parser.add_argument('n_steps', type = int, default = 1000, nargs = '?',\
                        help = 'Number of steps to simulate. Default is 1e5')
    parser.add_argument('--plot', action = 'store_true',\
                        help = 'Whether or not to plot results. Only allowed with d = 1 or 2.')
    args = parser.parse_args()
    spins = []
    Ks = [ 0.5,0.6,0.65, 0.7,0.8, 0.9]
    for K in Ks:
        print K
        spins.append(run_ising(K = K, **vars(args)))
    for K, spin in izip(Ks, spins):
        plt.plot(spin, label = K )
    plt.legend(loc = 'best')
    plt.ylim([-0.1, 1.1])
    plt.show() | 
	mit | -154,069,744,090,616,030 | 28.278075 | 115 | 0.568871 | false | 
| 
	benoitsteiner/tensorflow-opencl | 
	tensorflow/python/client/session_test.py | 
	11 | 
	71617 | 
	# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import threading
import time
import numpy as np
import six
from six.moves import xrange  # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops  # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
ops._USE_C_API = True
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
  def testUseExistingGraph(self):
    with ops.Graph().as_default() as g, ops.device('/cpu:0'):
      a = constant_op.constant(6.0, shape=[1, 1])
      b = constant_op.constant(7.0, shape=[1, 1])
      c = math_ops.matmul(a, b, name='matmul')
    with session.Session(graph=g):
      result = c.eval()
      self.assertAllEqual(result, [[42.0]])
  def testUseDefaultGraph(self):
    with ops.Graph().as_default(), ops.device('/cpu:0'):
      a = constant_op.constant(6.0, shape=[1, 1])
      b = constant_op.constant(7.0, shape=[1, 1])
      c = math_ops.matmul(a, b, name='matmul')
      with session.Session():
        result = c.eval()
        self.assertAllEqual(result, [[42.0]])
  def testCreate(self):
    with session.Session():
      inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
      copy = array_ops.identity(inp)
      # Test with feed.
      # TODO(mrry): Investigate why order='F' didn't work.
      arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
      copy_val = copy.eval({'W1:0': arr})
      self.assertAllEqual(arr, copy_val)
      # Test without feed.
      copy_val = copy.eval()
      self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
                                     dtype=np.float32), copy_val)
  def testManyCPUs(self):
    # TODO(keveman): Implement ListDevices and test for the number of
    # devices returned by ListDevices.
    with session.Session(
        config=config_pb2.ConfigProto(device_count={'CPU': 2})):
      inp = constant_op.constant(10.0, name='W1')
      self.assertAllEqual(inp.eval(), 10.0)
  def testPerSessionThreads(self):
    # TODO(keveman): Implement ListDevices and test for the number of
    # devices returned by ListDevices.
    with session.Session(
        config=config_pb2.ConfigProto(use_per_session_threads=True)):
      inp = constant_op.constant(10.0, name='W1')
      self.assertAllEqual(inp.eval(), 10.0)
  def testSessionInterOpThreadPool(self):
    config = config_pb2.ConfigProto()
    pool = config.session_inter_op_thread_pool.add()
    with session.Session(config=config) as s:
      inp = constant_op.constant(10.0, name='W1')
      results = s.run([inp])
      self.assertAllEqual([10.0], results)
    pool = config.session_inter_op_thread_pool.add()
    pool.num_threads = 1
    with session.Session(config=config) as s:
      inp = constant_op.constant(20.0, name='W2')
      results = s.run([inp])
      self.assertAllEqual([20.0], results)
    pool = config.session_inter_op_thread_pool.add()
    pool.num_threads = 1
    pool.global_name = 't1'
    run_options = config_pb2.RunOptions()
    run_options.inter_op_thread_pool = (
        len(config.session_inter_op_thread_pool) - 1)
    with session.Session(config=config) as s:
      inp = constant_op.constant(30.0, name='W2')
      results = s.run([inp], options=run_options)
      self.assertAllEqual([30.0], results)
  def testErrorsReported(self):
    with session.Session() as s:
      constant_op.constant(10.0, name='W1')
      with self.assertRaises(ValueError):
        s.run('foo:0')
  def testErrorPayload(self):
    with session.Session():
      a = array_ops.placeholder(dtypes.float32)
      with self.assertRaisesOpError(lambda e: e.op == a.op):
        a.eval()
  def testErrorCodeWithNoNodeDef(self):
    with session.Session() as s:
      a = array_ops.placeholder(dtypes.float32, shape=[])
      b = array_ops.placeholder(dtypes.float32, shape=[])
      r1 = math_ops.add(a, b)
      def exc_predicate(e):
        return (e.op is None and e.node_def is None and
                e.error_code == error_codes_pb2.INVALID_ARGUMENT)
      with self.assertRaisesOpError(exc_predicate):
        # Run with a bogus handle.
        s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
  @test_util.disable_c_api  # No shape registration for 'ConstructionFails'
  def testOpConstructionErrorPayload(self):
    with session.Session():
      failing_op = ops.get_default_graph().create_op(
          'ConstructionFails', [], [], name='f')
      def exc_predicate(e):
        return (e.op == failing_op
                and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
      with self.assertRaisesOpError(exc_predicate):
        failing_op.run()
  def testErrorBasedOn(self):
    with session.Session() as sess:
      a = constant_op.constant(0.0, shape=[2, 3])
      # NOTE(mrry): The original_op is nonsense, but used here to test that the
      #   errors are reported correctly.
      # pylint: disable=protected-access
      with sess.graph._original_op(a.op):
        b = array_ops.identity(a, name='id')
      with sess.graph._original_op(b.op):
        c = array_ops.placeholder(dtypes.float32)
      # pylint: enable=protected-access
      def exc_predicate(e):
        return (e.op == c.op
                and e.op._original_op == b.op
                and e.op._original_op._original_op == a.op)
      with self.assertRaisesOpError(exc_predicate):
        c.eval()
  def testFetchNone(self):
    with session.Session() as s:
      a = constant_op.constant(1.0)
      with self.assertRaises(TypeError):
        s.run(None)
      with self.assertRaises(TypeError):
        s.run([None])
      with self.assertRaises(TypeError):
        s.run({'b': None})
      with self.assertRaises(TypeError):
        s.run({'a': a, 'b': None})
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testFetchSingleton(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      res = sess.run(a)
      self.assertEqual(42.0, res)
      res = sess.run(a.op)  # An op, not a tensor.
      self.assertEqual(None, res)
      tensor_runner = sess.make_callable(a)
      res = tensor_runner()
      self.assertEqual(42.0, res)
      op_runner = sess.make_callable(a.op)
      res = op_runner()
      self.assertEqual(None, res)
  def testFetchSingletonByName(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      res = sess.run(a.name)
      self.assertEqual(42.0, res)
      res = sess.run(a.op)  # An op, not a tensor.
      self.assertEqual(None, res)
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testFetchList(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      b = control_flow_ops.no_op()  # An op, not a tensor.
      c = constant_op.constant(44.0)
      v = variables.Variable([54.0])
      assign = v.assign([63.0])
      res = sess.run([a, b, c, a.name, assign.op])
      self.assertTrue(isinstance(res, list))
      self.assertEqual([42.0, None, 44.0, 42.0, None], res)
      list_runner = sess.make_callable([a, b, c, a.name, assign.op])
      res = list_runner()
      self.assertTrue(isinstance(res, list))
      self.assertEqual([42.0, None, 44.0, 42.0, None], res)
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testFetchTuple(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      b = control_flow_ops.no_op()  # An op, not a tensor.
      c = constant_op.constant(44.0)
      res = sess.run((a, b, c, a.name))
      self.assertTrue(isinstance(res, tuple))
      self.assertEqual((42.0, None, 44.0, 42.0), res)
      tuple_runner = sess.make_callable((a, b, c, a.name))
      res = tuple_runner()
      self.assertTrue(isinstance(res, tuple))
      self.assertEqual((42.0, None, 44.0, 42.0), res)
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testFetchNamedTuple(self):
    # pylint: disable=invalid-name
    ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
    # pylint: enable=invalid-name
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      b = control_flow_ops.no_op()  # An op, not a tensor.
      c = constant_op.constant(44.0)
      res = sess.run(ABC(a, b, c))
      self.assertTrue(isinstance(res, ABC))
      self.assertEqual(42.0, res.a)
      self.assertEqual(None, res.b)
      self.assertEqual(44.0, res.c)
      namedtuple_runner = sess.make_callable(ABC(a, b, c))
      res = namedtuple_runner()
      self.assertTrue(isinstance(res, ABC))
      self.assertEqual(42.0, res.a)
      self.assertEqual(None, res.b)
      self.assertEqual(44.0, res.c)
  def testFetchDict(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      b = control_flow_ops.no_op()  # An op, not a tensor.
      c = constant_op.constant(44.0)
      res = sess.run({'a': a, 'b': b, 'c': c})
      self.assertTrue(isinstance(res, dict))
      self.assertEqual(42.0, res['a'])
      self.assertEqual(None, res['b'])
      self.assertEqual(44.0, res['c'])
  def testFetchOrderedDict(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      b = control_flow_ops.no_op()  # An op, not a tensor.
      c = constant_op.constant(44.0)
      res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
      self.assertTrue(isinstance(res, collections.OrderedDict))
      self.assertEqual([3, 2, 1], list(res.keys()))
      self.assertEqual(42.0, res[3])
      self.assertEqual(None, res[2])
      self.assertEqual(44.0, res[1])
  def testFetchNestingEmptyOneLevel(self):
    with session.Session() as sess:
      a_val = 11.0
      a = constant_op.constant(a_val)
      res = sess.run([[], tuple(), {}])
      self.assertTrue(isinstance(res, list))
      self.assertEquals(3, len(res))
      self.assertTrue(isinstance(res[0], list))
      self.assertEqual(0, len(res[0]))
      self.assertTrue(isinstance(res[1], tuple))
      self.assertEqual(0, len(res[1]))
      self.assertTrue(isinstance(res[2], dict))
      self.assertEqual(0, len(res[2]))
      res = sess.run([[], tuple(), {}, a])
      self.assertTrue(isinstance(res, list))
      self.assertEquals(4, len(res))
      self.assertTrue(isinstance(res[0], list))
      self.assertEqual(0, len(res[0]))
      self.assertTrue(isinstance(res[1], tuple))
      self.assertEqual(0, len(res[1]))
      self.assertTrue(isinstance(res[2], dict))
      self.assertEqual(0, len(res[2]))
      self.assertEqual(a_val, res[3])
  def testFetchNestingOneLevel(self):
    with session.Session() as sess:
      # pylint: disable=invalid-name
      ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
      DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
      # pylint: enable=invalid-name
      a_val = 42.0
      b_val = None
      c_val = 44.0
      a = constant_op.constant(a_val)
      b = control_flow_ops.no_op()  # An op, not a tensor.
      c = constant_op.constant(c_val)
      # List of lists, tuples, namedtuple, and dict
      res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
                      {'a': a.name, 'c': c, 'b': b}])
      self.assertTrue(isinstance(res, list))
      self.assertEqual(4, len(res))
      self.assertTrue(isinstance(res[0], list))
      self.assertEqual(3, len(res[0]))
      self.assertEqual(a_val, res[0][0])
      self.assertEqual(b_val, res[0][1])
      self.assertEqual(c_val, res[0][2])
      self.assertTrue(isinstance(res[1], tuple))
      self.assertEqual(3, len(res[1]))
      self.assertEqual(a_val, res[1][0])
      self.assertEqual(b_val, res[1][1])
      self.assertEqual(c_val, res[1][2])
      self.assertTrue(isinstance(res[2], ABC))
      self.assertEqual(a_val, res[2].a)
      self.assertEqual(b_val, res[2].b)
      self.assertEqual(c_val, res[2].c)
      self.assertTrue(isinstance(res[3], dict))
      self.assertEqual(3, len(res[3]))
      self.assertEqual(a_val, res[3]['a'])
      self.assertEqual(b_val, res[3]['b'])
      self.assertEqual(c_val, res[3]['c'])
      # Tuple of lists, tuples, namedtuple, and dict
      res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
                      {'a': a, 'c': c, 'b': b}))
      self.assertTrue(isinstance(res, tuple))
      self.assertEqual(4, len(res))
      self.assertTrue(isinstance(res[0], list))
      self.assertEqual(3, len(res[0]))
      self.assertEqual(a_val, res[0][0])
      self.assertEqual(b_val, res[0][1])
      self.assertEqual(c_val, res[0][2])
      self.assertTrue(isinstance(res[1], tuple))
      self.assertEqual(3, len(res[1]))
      self.assertEqual(a_val, res[1][0])
      self.assertEqual(b_val, res[1][1])
      self.assertEqual(c_val, res[1][2])
      self.assertTrue(isinstance(res[2], ABC))
      self.assertEqual(a_val, res[2].a)
      self.assertEqual(b_val, res[2].b)
      self.assertEqual(c_val, res[2].c)
      self.assertTrue(isinstance(res[3], dict))
      self.assertEqual(3, len(res[3]))
      self.assertEqual(a_val, res[3]['a'])
      self.assertEqual(b_val, res[3]['b'])
      self.assertEqual(c_val, res[3]['c'])
      # Namedtuple of lists, tuples, namedtuples, and dict
      res = sess.run(DEFG(d=[a, b, c],
                          e=(a, b, c),
                          f=ABC(a=a.name, b=b, c=c),
                          g={'a': a, 'c': c, 'b': b}))
      self.assertTrue(isinstance(res, DEFG))
      self.assertTrue(isinstance(res.d, list))
      self.assertEqual(3, len(res.d))
      self.assertEqual(a_val, res.d[0])
      self.assertEqual(b_val, res.d[1])
      self.assertEqual(c_val, res.d[2])
      self.assertTrue(isinstance(res.e, tuple))
      self.assertEqual(3, len(res.e))
      self.assertEqual(a_val, res.e[0])
      self.assertEqual(b_val, res.e[1])
      self.assertEqual(c_val, res.e[2])
      self.assertTrue(isinstance(res.f, ABC))
      self.assertEqual(a_val, res.f.a)
      self.assertEqual(b_val, res.f.b)
      self.assertEqual(c_val, res.f.c)
      self.assertTrue(isinstance(res.g, dict))
      self.assertEqual(3, len(res.g))
      self.assertEqual(a_val, res.g['a'])
      self.assertEqual(b_val, res.g['b'])
      self.assertEqual(c_val, res.g['c'])
      # Dict of lists, tuples, namedtuples, and dict
      res = sess.run({'d': [a, b, c],
                      'e': (a, b, c),
                      'f': ABC(a=a, b=b, c=c),
                      'g': {'a': a.name, 'c': c, 'b': b}})
      self.assertTrue(isinstance(res, dict))
      self.assertEqual(4, len(res))
      self.assertTrue(isinstance(res['d'], list))
      self.assertEqual(3, len(res['d']))
      self.assertEqual(a_val, res['d'][0])
      self.assertEqual(b_val, res['d'][1])
      self.assertEqual(c_val, res['d'][2])
      self.assertTrue(isinstance(res['e'], tuple))
      self.assertEqual(3, len(res['e']))
      self.assertEqual(a_val, res['e'][0])
      self.assertEqual(b_val, res['e'][1])
      self.assertEqual(c_val, res['e'][2])
      self.assertTrue(isinstance(res['f'], ABC))
      self.assertEqual(a_val, res['f'].a)
      self.assertEqual(b_val, res['f'].b)
      self.assertEqual(c_val, res['f'].c)
      self.assertTrue(isinstance(res['g'], dict))
      self.assertEqual(3, len(res['g']))
      self.assertEqual(a_val, res['g']['a'])
      self.assertEqual(b_val, res['g']['b'])
      self.assertEqual(c_val, res['g']['c'])
  def testFetchTensorObject(self):
    with session.Session() as s:
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      results_with_list = s.run([c])
      self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
      results_with_single = s.run(c)
      self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
      results_with_get = c.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
      a_val, b_val = s.run([a, b])  # Test multiple fetches.
      self.assertAllEqual([[1.0, 1.0]], a_val)
      self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
      results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
      self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
      self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
                          results_with_dict['b'])
      self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
      self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
      # Test nested structures
      results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
      self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
      self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
                          results_with_nested_list[0][0][1])
      self.assertAllEqual(results_with_nested_list[0][0][0],
                          results_with_nested_list[1])
      self.assertAllEqual(results_with_nested_list[1],
                          results_with_nested_list[2][0])
      self.assertAllEqual(results_with_nested_list[0][0][1],
                          results_with_nested_list[0][1])
      self.assertAllEqual(results_with_nested_list[0][1],
                          results_with_nested_list[2][1])
  def testFetchScalar(self):
    with session.Session() as s:
      for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
        x = scalar(7)
        y = scalar(8)
        tf_x = constant_op.constant(x, shape=[])
        tf_y = constant_op.constant(y)
        tf_xy = math_ops.add(tf_x, tf_y)
        # Single fetch
        xy = s.run(tf_xy)
        self.assertEqual(scalar, type(xy))
        self.assertEqual(x + y, xy)
        # List fetch
        xy, = s.run([tf_xy])
        self.assertEqual(scalar, type(xy))
        self.assertEqual(x + y, xy)
        # Dict fetch
        xy = s.run({'xy': tf_xy})['xy']
        self.assertEqual(scalar, type(xy))
        self.assertEqual(x + y, xy)
        # Nested list fetch
        xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
        self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
        self.assertEqual(scalar, type(xy[0][0][0]))
        self.assertEqual(scalar, type(xy[1]))
        self.assertEqual(scalar, type(xy[2][0]))
  def testFetchOperationObject(self):
    with session.Session() as s:
      a = constant_op.constant(1.0, shape=[1, 2])
      v = variables.Variable(a, name='testFetchOperationObject_v')
      s.run(v.initializer)
      v_val = s.run(v)
      self.assertAllEqual([[1.0, 1.0]], v_val)
  def testFetchSparseTensor(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      shape = np.array([7, 9, 2]).astype(np.int64)
      sp = sparse_tensor.SparseTensor(
          constant_op.constant(indices),
          constant_op.constant(values),
          constant_op.constant(shape))
      # Single fetch, use as tuple
      sp_out = s.run(sp)
      indices_out, values_out, shape_out = sp_out
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Single fetch, use as SparseTensorValue
      sp_out = s.run(sp)
      self.assertAllEqual(sp_out.indices, indices)
      self.assertAllEqual(sp_out.values, values)
      self.assertAllEqual(sp_out.dense_shape, shape)
      # Tuple fetch, use as tuple
      indices_out, values_out, shape_out = s.run(sp)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # List fetch, use as tuple
      (indices_out, values_out, shape_out), = s.run([sp])
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # List fetch, use as SparseTensorValue
      sp_out, = s.run([sp])
      self.assertAllEqual(sp_out.indices, indices)
      self.assertAllEqual(sp_out.values, values)
      self.assertAllEqual(sp_out.dense_shape, shape)
      # Dict fetch (single value), use as tuple
      indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Dict fetch (list value), use as tuple
      (indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Dict fetch, use as SparseTensorValue
      sp_out = s.run({'sp': sp})['sp']
      self.assertAllEqual(sp_out.indices, indices)
      self.assertAllEqual(sp_out.values, values)
      self.assertAllEqual(sp_out.dense_shape, shape)
      # Nested list fetch use as tuple
      sp_out = s.run([[[sp]], sp])
      indices_out, values_out, shape_out = sp_out[0][0][0]
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      indices_out, values_out, shape_out = sp_out[1]
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Nested list fetch, use as SparseTensorValue
      sp_out = s.run([[[sp]], sp])
      self.assertAllEqual(sp_out[0][0][0].indices, indices)
      self.assertAllEqual(sp_out[0][0][0].values, values)
      self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
      self.assertAllEqual(sp_out[1].indices, indices)
      self.assertAllEqual(sp_out[1].values, values)
      self.assertAllEqual(sp_out[1].dense_shape, shape)
  def testFeedSparseTensor(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      shape = np.array([7, 9, 2]).astype(np.int64)
      sp = sparse_tensor.SparseTensor(
          array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
          array_ops.placeholder(dtype=np.float32, shape=(2,)),
          array_ops.placeholder(dtype=np.int64, shape=(3,)),)
      sp_indices = array_ops.identity(sp.indices)
      sp_values = array_ops.identity(sp.values)
      sp_shape = array_ops.identity(sp.dense_shape)
      sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
      # Feed with tuple
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Feed with tuple, fetch sp directly
      sp_out = s.run(sp, {sp: (indices, values, shape)})
      self.assertAllEqual(sp_out.indices, indices)
      self.assertAllEqual(sp_out.values, values)
      self.assertAllEqual(sp_out.dense_shape, shape)
      # Feed with SparseTensorValue
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape],
          {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Feed with SparseTensorValue, fetch SparseTensorValue
      sp2_out = s.run(
          sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(sp2_out.indices, indices)
      self.assertAllEqual(sp2_out.values, values)
      self.assertAllEqual(sp2_out.dense_shape, shape)
      # Feed SparseTensorValue and fetch sp directly.
      sp_out = s.run(
          sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(sp_out.indices, indices)
      self.assertAllEqual(sp_out.values, values)
      self.assertAllEqual(sp_out.dense_shape, shape)
  def testFeedSparsePlaceholder(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      shape = np.array([7, 9, 2]).astype(np.int64)
      sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
      sp_indices = array_ops.identity(sp.indices)
      sp_values = array_ops.identity(sp.values)
      sp_shape = array_ops.identity(sp.dense_shape)
      sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
      # Feed with tuple
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Feed with SparseTensorValue
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape],
          {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Feed with SparseTensorValue, fetch SparseTensorValue
      sp2_out = s.run(
          sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(sp2_out.indices, indices)
      self.assertAllEqual(sp2_out.values, values)
      self.assertAllEqual(sp2_out.dense_shape, shape)
  def testFeedSparsePlaceholderPartialShape(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      shape = np.array([7, 9, 2]).astype(np.int64)
      sp = array_ops.sparse_placeholder(
          shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
      sp_indices = array_ops.identity(sp.indices)
      sp_values = array_ops.identity(sp.values)
      sp_shape = array_ops.identity(sp.dense_shape)
      sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
      # Feed with tuple
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Feed with SparseTensorValue
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape],
          {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
      # Feed with SparseTensorValue, fetch SparseTensorValue
      sp2_out = s.run(
          sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
      self.assertAllEqual(sp2_out.indices, indices)
      self.assertAllEqual(sp2_out.values, values)
      self.assertAllEqual(sp2_out.dense_shape, shape)
  def testFeedSparsePlaceholderConstantShape(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      shape = np.array([7, 9, 2]).astype(np.int64)
      sp = array_ops.sparse_placeholder(dtype=np.float32,
                                        shape=shape,
                                        name='placeholder1')
      self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
      self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
      sp_indices = array_ops.identity(sp.indices)
      sp_values = array_ops.identity(sp.values)
      sp_shape = array_ops.identity(sp.dense_shape)
      # Feed with tuple
      indices_out, values_out, shape_out = s.run(
          [sp_indices, sp_values, sp_shape], {sp: (indices, values)})
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(shape_out, shape)
  def testFetchIndexedSlices(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      dense_shape = np.array([7, 9, 2]).astype(np.int64)
      ind = ops.IndexedSlices(
          constant_op.constant(values), constant_op.constant(indices),
          constant_op.constant(dense_shape))
      # Single fetch, use as tuple
      ind_out = s.run(ind)
      values_out, indices_out, dense_shape_out = ind_out
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # Single fetch, use as IndexedSlicesValue
      ind_out = s.run(ind)
      self.assertAllEqual(ind_out.values, values)
      self.assertAllEqual(ind_out.indices, indices)
      self.assertAllEqual(ind_out.dense_shape, dense_shape)
      # Tuple fetch, use as tuple
      values_out, indices_out, dense_shape_out = s.run(ind)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # List fetch, use as tuple
      (values_out, indices_out, dense_shape_out), = s.run([ind])
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # List fetch, use as IndexedSlicesValue
      ind_out, = s.run([ind])
      self.assertAllEqual(ind_out.values, values)
      self.assertAllEqual(ind_out.indices, indices)
      self.assertAllEqual(ind_out.dense_shape, dense_shape)
  def testFeedIndexedSlices(self):
    with session.Session() as s:
      values = np.array([1.0, 2.0]).astype(np.float32)
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      dense_shape = np.array([7, 9, 2]).astype(np.int64)
      ind = ops.IndexedSlices(
          array_ops.placeholder(dtype=np.float32,
                                shape=(2,)),
          array_ops.placeholder(dtype=np.int64,
                                shape=(2, 3)),
          array_ops.placeholder(dtype=np.int64,
                                shape=(3,)),)
      ind_values = array_ops.identity(ind.values)
      ind_indices = array_ops.identity(ind.indices)
      ind_dense_shape = array_ops.identity(ind.dense_shape)
      ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
      # Feed with tuple
      values_out, indices_out, dense_shape_out = s.run(
          [ind_values, ind_indices, ind_dense_shape],
          {ind: (values, indices, dense_shape)})
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # Feed with IndexedSlicesValue
      values_out, indices_out, dense_shape_out = s.run(
          [ind_values, ind_indices, ind_dense_shape],
          {ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # Feed with IndexedSlicesValue, fetch IndexedSlicesValue
      ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
                                                          dense_shape)})
      self.assertAllEqual(ind2_out.values, values)
      self.assertAllEqual(ind2_out.indices, indices)
      self.assertAllEqual(ind2_out.dense_shape, dense_shape)
  def testFetchIndexedSlicesWithoutDenseShape(self):
    with session.Session() as s:
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      values = np.array([1.0, 2.0]).astype(np.float32)
      dense_shape = None
      ind = ops.IndexedSlices(
          constant_op.constant(values), constant_op.constant(indices), None)
      # Single fetch, use as tuple
      ind_out = s.run(ind)
      values_out, indices_out, dense_shape_out = ind_out
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # Single fetch, use as IndexedSlicesValue
      ind_out = s.run(ind)
      self.assertAllEqual(ind_out.values, values)
      self.assertAllEqual(ind_out.indices, indices)
      self.assertAllEqual(ind_out.dense_shape, dense_shape)
      # Tuple fetch, use as tuple
      values_out, indices_out, dense_shape_out = s.run(ind)
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # List fetch, use as tuple
      (values_out, indices_out, dense_shape_out), = s.run([ind])
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      self.assertAllEqual(dense_shape_out, dense_shape)
      # List fetch, use as IndexedSlicesValue
      ind_out, = s.run([ind])
      self.assertAllEqual(ind_out.values, values)
      self.assertAllEqual(ind_out.indices, indices)
      self.assertAllEqual(ind_out.dense_shape, dense_shape)
  def testFeedIndexedSlicesWithoutDenseShape(self):
    with session.Session() as s:
      values = np.array([1.0, 2.0]).astype(np.float32)
      indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
      dense_shape = None
      ind = ops.IndexedSlices(
          array_ops.placeholder(dtype=np.float32,
                                shape=(2,)),
          array_ops.placeholder(dtype=np.int64,
                                shape=(2, 3)),
          None)
      ind_values = array_ops.identity(ind.values)
      ind_indices = array_ops.identity(ind.indices)
      ind2 = ops.IndexedSlices(ind_values, ind_indices)
      # Feed with tuple
      values_out, indices_out = s.run(
          [ind_values, ind_indices], {ind: (values, indices)})
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      # Feed with IndexedSlicesValue
      values_out, indices_out = s.run(
          [ind_values, ind_indices],
          {ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
      self.assertAllEqual(values_out, values)
      self.assertAllEqual(indices_out, indices)
      # Feed with IndexedSlicesValue, fetch IndexedSlicesValue
      ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
                                                          dense_shape)})
      self.assertAllEqual(ind2_out.values, values)
      self.assertAllEqual(ind2_out.indices, indices)
      self.assertAllEqual(ind2_out.dense_shape, dense_shape)
  def testExtendWithStatelessOperations(self):
    with session.Session() as s:
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      c_val = s.run(c)
      self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
      d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
      e = math_ops.matmul(c, d)
      # Extend will happen here.
      e_val = s.run(e)
      self.assertAllEqual([[24.0]], e_val)
  def testExtendWithStatefulOperations(self):
    with session.Session() as s:
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
      v.initializer.run()
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      d = constant_op.constant(3.0, shape=[2, 3])
      e = math_ops.matmul(a, d)
      assign_e_to_v = state_ops.assign(v, e)
      # Extend will happen here.
      e_val = e.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      s.run(assign_e_to_v)
      v_val = v.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
  def testExtendWithGroupBy(self):
    with session.Session() as s:
      a = constant_op.constant(1.0, shape=[1, 2])
      p = variables.Variable(a, name='testExtendWithGroupBy_p')
      a_val = a.eval()  # Force an Extend after this op.
      self.assertAllEqual([[1.0, 1.0]], a_val)
      b = constant_op.constant(2.0, shape=[1, 2])
      q = variables.Variable(b, name='testExtendWithGroupBy_q')
      # Extend will happen here.
      init = control_flow_ops.group(p.initializer, q.initializer)
      s.run(init)
      p_val, q_val = s.run([p, q])
      self.assertAllEqual([[1.0, 1.0]], p_val)
      self.assertAllEqual([[2.0, 2.0]], q_val)
  def testTensorGetMethod(self):
    with session.Session():
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      c_val = c.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
      fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
      self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
  def testOperationRunMethod(self):
    with session.Session():
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[1, 2], name='b')
      v = variables.Variable(a, a.dtype)
      assign_a_to_v = state_ops.assign(v, a)
      assign_a_to_v.eval()
      v_val = v.eval()
      self.assertAllEqual([[1.0, 1.0]], v_val)
      assign_b_to_v = state_ops.assign(v, b)
      assign_b_to_v.eval()
      v_val = v.eval()
      self.assertAllEqual([[2.0, 2.0]], v_val)
      assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
      v_val = v.eval()
      self.assertAllEqual([[3.0, 3.0]], v_val)
  def testDefaultGraph(self):
    with session.Session() as s:
      self.assertEqual(ops.get_default_graph(), s.graph)
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      self.assertEqual(ops.get_default_graph(), a.graph)
      self.assertEqual(ops.get_default_graph(), b.graph)
      c = math_ops.matmul(a, b)
      v = variables.Variable(c, name='testDefaultGraph_v')
      v.initializer.run()
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      d = constant_op.constant(3.0, shape=[2, 3])
      e = math_ops.matmul(a, d)
      assign_e_to_v = state_ops.assign(v, e)
      e_val = e.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      s.run(assign_e_to_v)
      v_val = v.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
      self.assertEqual(ops.get_default_graph(), s.graph)
  def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
    with session.Session() as s:
      self.assertEqual(ops.get_default_graph(), s.graph)
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      v = variables.Variable(c, name='var_%d' % i)
      # Block here until all threads have constructed their graph.
      constructed_event.set()
      continue_event.wait()
      assign_c_to_v = state_ops.assign(v, c)
      v.initializer.run()
      assign_c_to_v.eval()
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      d = constant_op.constant(3.0, shape=[2, 3])
      e = math_ops.matmul(a, d)
      assign_e_to_v = state_ops.assign(v, e)
      e_val = e.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      s.run(assign_e_to_v)
      v_val = v.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
      self.assertEqual(ops.get_default_graph(), s.graph)
  def testDefaultGraphWithThreads(self):
    # Fork ten threads that use their thread-local default graph.
    threads = []
    constructed_events = [threading.Event() for _ in range(10)]
    continue_event = threading.Event()
    for i, constructed_event in enumerate(constructed_events):
      t = self.checkedThread(target=self._testDefaultGraphInThread,
                             args=(constructed_event, continue_event, i))
      threads.append(t)
    for t in threads:
      t.start()
    for constructed_event in constructed_events:
      constructed_event.wait()
    continue_event.set()
    for t in threads:
      t.join()
  def testParallelRun(self):
    with session.Session() as sess:
      c = constant_op.constant(5.0)
      ev = threading.Event()
      def run_step():
        ev.wait()
        val = c.eval(session=sess)
        self.assertEqual(val, 5.0)
      threads = [self.checkedThread(target=run_step) for _ in range(100)]
      for t in threads:
        t.start()
      ev.set()
      for t in threads:
        t.join()
  def testRunFeedDict(self):
    with session.Session() as s:
      x = array_ops.zeros([2])
      y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
      self.assertAllEqual(y, 2 * np.ones(2))
      y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
      self.assertAllEqual(y, 2 * np.ones(2))
      y = s.run(2 * x, feed_dict={x: [1, 1]})
      assert (y == 2 * np.ones(2)).all()
      # Test nested tuple keys
      z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
           (array_ops.zeros([2]),))
      result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
      values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
      result_value = s.run(result, feed_dict={z: values})
      self.assertAllEqual(result_value[0], 2 * np.ones(2))
      self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
      self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
  def testGraphDef(self):
    with session.Session() as sess:
      self.assertProtoEquals(
          'versions { producer: %d min_consumer: %d }' % (
              versions.GRAPH_DEF_VERSION,
              versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
          sess.graph_def)
      c = constant_op.constant(5.0, name='c')
      self.assertEquals(len(sess.graph_def.node), 1)
      d = constant_op.constant(6.0, name='d')
      self.assertEquals(len(sess.graph_def.node), 2)
      self.assertAllEqual(c.eval(), 5.0)
      self.assertAllEqual(d.eval(), 6.0)
      e = constant_op.constant(7.0, name='e')
      self.assertEquals(len(sess.graph_def.node), 3)
      self.assertAllEqual(e.eval(), 7.0)
  def testUseAfterClose(self):
    with session.Session() as sess:
      c = constant_op.constant(5.0)
      self.assertAllEqual(sess.run(c), 5.0)
    with self.assertRaisesWithPredicateMatch(
        RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
      sess.run(c)
  def testUseAfterCloseConcurrent(self):
    with session.Session() as sess:
      c = constant_op.constant(5.0)
      self.assertAllEqual(sess.run(c), 5.0)
      def update_thread():
        with self.assertRaisesWithPredicateMatch(
            RuntimeError,
            lambda e: 'Attempted to use a closed Session.' in str(e)):
          while True:
            sess.run(c)
      t = threading.Thread(target=update_thread)
      t.start()
      time.sleep(0.1)
      sess.close()
      t.join()
  def testUseEmptyGraph(self):
    with session.Session() as sess:
      with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
        sess.run([])
      with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
        sess.run(())
      with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
        sess.run({})
  def testNotEntered(self):
    # pylint: disable=protected-access
    self.assertEqual(ops._default_session_stack.get_default(), None)
    # pylint: enable=protected-access
    with ops.device('/cpu:0'):
      sess = session.Session()
      c_1 = constant_op.constant(5.0)
      with sess.graph.as_default():
        c_2 = constant_op.constant(5.0)
      self.assertEqual(c_1.graph, c_2.graph)
      self.assertEqual(sess.run(c_2), 5.0)
      with self.assertRaisesWithPredicateMatch(
          ValueError, lambda e: 'No default session is registered.' in str(e)):
        c_2.eval()
  def testInteractive(self):
    with ops.device('/cpu:0'):
      sess = session.InteractiveSession()
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
      d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
      e = math_ops.matmul(c, d)
      self.assertAllEqual([[24.0]], e.eval())
      sess.close()
  def testInteractivePlacePrunedGraph(self):
    sess = session.InteractiveSession()
    # Build a graph that has a bad op in it (no kernel).
    #
    # This test currently does not link in any GPU kernels,
    # which is why placing this is invalid.  If at some point
    # GPU kernels are added to this test, some other different
    # op / device combo should be chosen.
    with ops.device('/device:GPU:0'):
      a = constant_op.constant(1.0, shape=[1, 2])
    b = constant_op.constant(1.0, shape=[1, 2])
    # Only run the valid op, this should work.
    b.eval()
    with self.assertRaises(errors.InvalidArgumentError):
      a.eval()
    sess.close()
  def testDefaultSessionPlacePrunedGraph(self):
    sess = session.Session()
    # Build a graph that has a bad op in it (no kernel).
    #
    # This test currently does not link in any GPU kernels,
    # which is why placing this is invalid.  If at some point
    # GPU kernels are added to this test, some other different
    # op / device combo should be chosen.
    with ops.device('/device:GPU:0'):
      _ = constant_op.constant(1.0, shape=[1, 2])
    b = constant_op.constant(1.0, shape=[1, 2])
    with self.assertRaises(errors.InvalidArgumentError):
      # Even though we don't run the bad op, we place the entire
      # graph, which should fail with a non-interactive session.
      sess.run(b)
    sess.close()
  def testSharedGraph(self):
    with ops.Graph().as_default() as g, ops.device('/cpu:0'):
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
    with session.Session(graph=g) as sess1:
      with session.Session(graph=g) as sess2:
        self.assertAllEqual(sess1.run(c), sess2.run(c))
  def testDuplicatedInputs(self):
    with session.Session() as sess:
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[1, 3])
      a_val, b_val, a2_val = sess.run([a, b, a])
      self.assertAllEqual(a_val, [[1.0, 1.0]])
      self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
      self.assertAllEqual(a2_val, [[1.0, 1.0]])
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testFeedAndFetch(self):
    with session.Session() as sess:
      for dtype in [dtypes.float16,
                    dtypes.float32,
                    dtypes.float64,
                    dtypes.int32,
                    dtypes.uint8,
                    dtypes.int16,
                    dtypes.int8,
                    dtypes.int64,
                    dtypes.bool,
                    dtypes.complex64,
                    dtypes.complex128]:
        for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
          np_dtype = dtype.as_numpy_dtype
          feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
          out_t = array_ops.identity(feed_t)
          np_array = np.random.randint(-10, 10, shape)
          if dtype == dtypes.bool:
            np_array = np_array > 0
          elif dtype == dtypes.complex64:
            np_array = np.sqrt(np_array.astype(np_dtype))
          elif dtype == dtypes.complex64:
            np_array = np.sqrt(np_array.astype(np_dtype))
          else:
            np_array = np_array.astype(np_dtype)
          self.assertAllEqual(np_array,
                              sess.run(out_t, feed_dict={feed_t: np_array}))
          # Check that we can also get the feed back.
          self.assertAllEqual(np_array,
                              sess.run(feed_t, feed_dict={feed_t: np_array}))
          # Also check that we can get both back.
          out_v, feed_v = sess.run([out_t, feed_t],
                                   feed_dict={feed_t: np_array})
          self.assertAllEqual(np_array, out_v)
          self.assertAllEqual(np_array, feed_v)
          feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
          out_v, feed_v = feed_fetch_runner(np_array)
          self.assertAllEqual(np_array, out_v)
          self.assertAllEqual(np_array, feed_v)
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testMakeCallableOnTensorWithRunOptions(self):
    with session.Session() as sess:
      a = constant_op.constant(42.0)
      tensor_runner = sess.make_callable(a, accept_options=True)
      run_options = config_pb2.RunOptions(
          trace_level=config_pb2.RunOptions.FULL_TRACE)
      run_metadata = config_pb2.RunMetadata()
      self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
      res = tensor_runner(options=run_options, run_metadata=run_metadata)
      self.assertEqual(42.0, res)
      self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testMakeCallableOnOperationWithRunOptions(self):
    with session.Session() as sess:
      a = variables.Variable(42.0)
      b = state_ops.assign_add(a, 1.0)
      sess.run(a.initializer)
      tensor_runner = sess.make_callable(b.op, accept_options=True)
      run_options = config_pb2.RunOptions(
          trace_level=config_pb2.RunOptions.FULL_TRACE)
      run_metadata = config_pb2.RunMetadata()
      self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
      tensor_runner(options=run_options, run_metadata=run_metadata)
      self.assertEqual(43.0, sess.run(a))
      self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
  @test_util.disable_c_api  # session.make_callable() doesn't work with C API
  def testMakeCallableWithFeedListAndRunOptions(self):
    with session.Session() as sess:
      ph = array_ops.placeholder(dtypes.float32)
      a = math_ops.add(ph, 1.0)
      tensor_runner = sess.make_callable(
          a, feed_list=[ph.name], accept_options=True)
      run_options = config_pb2.RunOptions(
          trace_level=config_pb2.RunOptions.FULL_TRACE)
      run_metadata = config_pb2.RunMetadata()
      self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
      self.assertAllClose(
          42.0,
          tensor_runner(41.0, options=run_options, run_metadata=run_metadata))
      self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
  def testFeedError(self):
    with session.Session() as sess:
      feed_t = array_ops.placeholder(dtype=dtypes.float32)
      out_t = array_ops.identity(feed_t)
      feed_val = constant_op.constant(5.0)
      with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
        sess.run(out_t, feed_dict={feed_t: feed_val})
      with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
        out_t.eval(feed_dict={feed_t: feed_val})
      with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
        out_t.op.run(feed_dict={feed_t: feed_val})
  def testFeedPrecisionLossError(self):
    with session.Session() as sess:
      largest_int64 = np.iinfo(np.int64).max
      feed_int_implicit_int32 = constant_op.constant(1)
      feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
      out_t = constant_op.constant(1.0)
      with self.assertRaisesRegexp(TypeError,
                                   'is not compatible with Tensor type'):
        sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
      with self.assertRaisesRegexp(TypeError,
                                   'is not compatible with Tensor type'):
        sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
  def testStringFetch(self):
    with session.Session():
      for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
        size = 1
        for s in shape:
          size *= s
        c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
                          dtype=np.object).reshape(shape) if size > 0 else []
        c = constant_op.constant(c_list)
        self.assertAllEqual(c.eval(), c_list)
  def testStringFeed(self):
    with session.Session() as sess:
      for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
        size = 1
        for s in shape:
          size *= s
        c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
                          dtype=np.object).reshape(shape)
        feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
        c = array_ops.identity(feed_t)
        self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
        self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
                            c_list)
        c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
        self.assertAllEqual(c_v, c_list)
        self.assertAllEqual(feed_v, c_list)
  def testStringFeedWithNullCharacters(self):
    with session.Session():
      c_list = [b'\n\x01\x00', b'\n\x00\x01']
      feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
      c = array_ops.identity(feed_t)
      out = c.eval(feed_dict={feed_t: c_list})
      self.assertEqual(c_list[0], out[0])
      self.assertEqual(c_list[1], out[1])
  def testStringFeedWithUnicode(self):
    with session.Session():
      c_list = [u'\n\x01\x00', u'\n\x00\x01',
                u'\u26a3 unicode', u'\U0001f60e deal with it']
      feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
      c = array_ops.identity(feed_t)
      out = c.eval(feed_dict={feed_t: c_list})
      for i in range(len(c_list)):
        self.assertEqual(c_list[i], out[i].decode('utf-8'))
      out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
      for i in range(len(c_list)):
        self.assertEqual(c_list[i], out[i].decode('utf-8'))
  def testInvalidTargetFails(self):
    with self.assertRaisesRegexp(
        errors.NotFoundError,
        'No session factory registered for the given session options'):
      session.Session('INVALID_TARGET')
  def testFetchByNameDifferentStringTypes(self):
    with session.Session() as sess:
      c = constant_op.constant(42.0, name='c')
      d = constant_op.constant(43.0, name=u'd')
      e = constant_op.constant(44.0, name=b'e')
      f = constant_op.constant(45.0, name=r'f')
      self.assertTrue(isinstance(c.name, six.text_type))
      self.assertTrue(isinstance(d.name, six.text_type))
      self.assertTrue(isinstance(e.name, six.text_type))
      self.assertTrue(isinstance(f.name, six.text_type))
      self.assertEqual(42.0, sess.run('c:0'))
      self.assertEqual(42.0, sess.run(u'c:0'))
      self.assertEqual(42.0, sess.run(b'c:0'))
      self.assertEqual(42.0, sess.run(r'c:0'))
      self.assertEqual(43.0, sess.run('d:0'))
      self.assertEqual(43.0, sess.run(u'd:0'))
      self.assertEqual(43.0, sess.run(b'd:0'))
      self.assertEqual(43.0, sess.run(r'd:0'))
      self.assertEqual(44.0, sess.run('e:0'))
      self.assertEqual(44.0, sess.run(u'e:0'))
      self.assertEqual(44.0, sess.run(b'e:0'))
      self.assertEqual(44.0, sess.run(r'e:0'))
      self.assertEqual(45.0, sess.run('f:0'))
      self.assertEqual(45.0, sess.run(u'f:0'))
      self.assertEqual(45.0, sess.run(b'f:0'))
      self.assertEqual(45.0, sess.run(r'f:0'))
  def testIncorrectGraph(self):
    with ops.Graph().as_default() as g_1:
      c_1 = constant_op.constant(1.0, name='c')
    with ops.Graph().as_default() as g_2:
      c_2 = constant_op.constant(2.0, name='c')
    self.assertEqual('c', c_1.op.name)
    self.assertEqual('c', c_2.op.name)
    with session.Session(graph=g_1) as sess_1:
      self.assertEqual(1.0, sess_1.run(c_1))
      with self.assertRaises(ValueError):
        sess_1.run(c_2)
      with self.assertRaises(ValueError):
        sess_1.run(c_2.op)
    with session.Session(graph=g_2) as sess_2:
      with self.assertRaises(ValueError):
        sess_2.run(c_1)
      with self.assertRaises(ValueError):
        sess_2.run(c_1.op)
      self.assertEqual(2.0, sess_2.run(c_2))
  def testFeedDictKeyException(self):
    with session.Session() as sess:
      a = constant_op.constant(1.0, dtypes.float32, name='a')
      with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
        sess.run(a, feed_dict={'a': [2.0]})
  def testPerStepTrace(self):
    run_options = config_pb2.RunOptions(
        trace_level=config_pb2.RunOptions.FULL_TRACE)
    run_metadata = config_pb2.RunMetadata()
    with ops.device('/cpu:0'):
      with session.Session() as sess:
        sess.run(constant_op.constant(1.0))
        self.assertTrue(not run_metadata.HasField('step_stats'))
        sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
        self.assertTrue(not run_metadata.HasField('step_stats'))
        sess.run(constant_op.constant(1.0),
                 options=run_options,
                 run_metadata=run_metadata)
        self.assertTrue(run_metadata.HasField('step_stats'))
        self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
  def testRunOptionsRunMetadata(self):
    run_options = config_pb2.RunOptions(
        trace_level=config_pb2.RunOptions.FULL_TRACE)
    run_metadata = config_pb2.RunMetadata()
    with ops.device('/cpu:0'):
      with session.Session() as sess:
        # all combinations are valid
        sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
        sess.run(constant_op.constant(1.0), options=None,
                 run_metadata=run_metadata)
        self.assertTrue(not run_metadata.HasField('step_stats'))
        sess.run(constant_op.constant(1.0), options=run_options,
                 run_metadata=None)
        self.assertTrue(not run_metadata.HasField('step_stats'))
        sess.run(constant_op.constant(1.0), options=run_options,
                 run_metadata=run_metadata)
        self.assertTrue(run_metadata.HasField('step_stats'))
        self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
  def testFeedShapeCompatibility(self):
    with session.Session() as sess:
      some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
      new_shape = constant_op.constant([2, 2])
      reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
      with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
        sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
      with self.assertRaisesRegexp(ValueError, 'may not be fed'):
        sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
  def testInferShapesFalse(self):
    with ops.Graph().as_default(), ops.device('/cpu:0'):
      a = constant_op.constant([[1, 2]])
      sess = session.Session()
      self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
      # Avoid lint error regarding 'unused' var a.
      self.assertTrue(a == a)
  def testInferShapesTrue(self):
    config = config_pb2.ConfigProto(
        graph_options=config_pb2.GraphOptions(infer_shapes=True))
    with ops.Graph().as_default(), ops.device('/cpu:0'):
      a = constant_op.constant([[1, 2]])
      sess = session.Session(config=config)
      self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
      # Avoid lint error regarding 'unused' var a.
      self.assertTrue(a == a)
  def testBuildCostModel(self):
    run_options = config_pb2.RunOptions()
    config = config_pb2.ConfigProto(
        allow_soft_placement=True,
        graph_options=config_pb2.GraphOptions(build_cost_model=100))
    with session.Session(config=config) as sess:
      with ops.device('/device:GPU:0'):
        a = array_ops.placeholder(dtypes.float32, shape=[])
        b = math_ops.add(a, a)
        c = array_ops.identity(b)
        d = math_ops.multiply(c, c)
      for step in xrange(120):
        run_metadata = config_pb2.RunMetadata()
        sess.run(d, feed_dict={a: 1.0},
                 options=run_options, run_metadata=run_metadata)
        if step == 99:
          self.assertTrue(run_metadata.HasField('cost_graph'))
        else:
          self.assertFalse(run_metadata.HasField('cost_graph'))
  def runTestOutputPartitionGraphs(self, sess):
    run_options = config_pb2.RunOptions(output_partition_graphs=True)
    a = constant_op.constant(1)
    run_metadata = config_pb2.RunMetadata()
    sess.run(a, options=run_options, run_metadata=run_metadata)
    self.assertGreater(len(run_metadata.partition_graphs), 0)
    sess.run(a, run_metadata=run_metadata)
    self.assertEqual(len(run_metadata.partition_graphs), 0)
  def testOutputPartitionGraphsDirect(self):
    self.runTestOutputPartitionGraphs(session.Session())
  def testOutputPartitionGraphsDistributed(self):
    server = server_lib.Server.create_local_server()
    self.runTestOutputPartitionGraphs(session.Session(server.target))
  def testNonInteractiveSessionNesting(self):
    sess1 = session.Session()
    sess1_controller = sess1.as_default()
    sess1_controller.__enter__()
    sess2 = session.Session()
    sess2_controller = sess2.as_default()
    sess2_controller.__enter__()
    with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
      sess1_controller.__exit__(None, None, None)
    ops._default_session_stack.reset()
  def testInteractiveSessionNesting(self):
    sess1 = session.InteractiveSession()
    sess2 = session.InteractiveSession()
    del sess1
    del sess2
  def testAsDefault(self):
    c = constant_op.constant(37)
    sess = session.Session()
    with sess.as_default():
      self.assertEqual(37, c.eval())
    # Ensure that the session remains valid even when it is not captured.
    with session.Session().as_default():
      self.assertEqual(37, c.eval())
  def testReentry(self):
    sess = session.Session()
    with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
      with sess:
        with sess:
          pass
  def testInvalidArgument(self):
    with self.assertRaisesRegexp(TypeError, 'target must be a string'):
      session.Session(37)
    with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
      session.Session(config=37)
    with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
      session.Session(graph=37)
  def testTimeoutWithShortOperations(self):
    num_epochs = 5
    q = data_flow_ops.FIFOQueue(
        capacity=50, dtypes=[dtypes.int32], shapes=[()])
    enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
    # Use a 10-second timeout, which should be longer than any
    # non-blocking enqueue_many op.
    config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
    with session.Session(config=config) as sess:
      for _ in range(num_epochs):
        sess.run(enqueue_op)
      self.assertEqual(sess.run(q.size()), num_epochs * 2)
  @test_util.disable_c_api  # set_device does not work with C API
  def testRegisterFetchAndFeedConversionFunctions(self):
    class SquaredTensor(object):
      def __init__(self, tensor):
        self.sq = math_ops.square(tensor)
    fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
    feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
    feed_fn2 = lambda feed: [feed.sq]
    session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
        feed_fn1, feed_fn2)
    with self.assertRaises(ValueError):
      session.register_session_run_conversion_functions(SquaredTensor,
          fetch_fn, feed_fn1, feed_fn2)
    with self.test_session() as sess:
      np1 = np.array([1.0, 1.5, 2.0, 2.5])
      np2 = np.array([3.0, 3.5, 4.0, 4.5])
      squared_tensor = SquaredTensor(np2)
      squared_eval = sess.run(squared_tensor)
      self.assertAllClose(np2 * np2, squared_eval)
      squared_eval = sess.run(squared_tensor, feed_dict={
        squared_tensor : np1 * np1})
      self.assertAllClose(np1 * np1, squared_eval)
      partial_run = sess.partial_run_setup([squared_tensor], [])
      squared_eval = sess.partial_run(partial_run, squared_tensor)
      self.assertAllClose(np2 * np2, squared_eval)
  def testDefaultLogDevicePlacement(self):
    class CaptureStderr(str):
      """Class to capture stderr from C++ shared library."""
      def __enter__(self):
        self._esc = compat.as_str('\b')
        self._output = compat.as_str('')
        self._stderr = sys.stderr
        self._fd = self._stderr.fileno()
        self._out_pipe, in_pipe = os.pipe()
        # Save the original io stream.
        self._dup_fd = os.dup(self._fd)
        # Replace the original io stream with in pipe.
        os.dup2(in_pipe, self._fd)
        return self
      def __exit__(self, *args):
        self._stderr.write(self._esc)
        self._stderr.flush()
        self.read()
        os.close(self._out_pipe)
        # Restore the original io stream.
        os.dup2(self._dup_fd, self._fd)
      def read(self):
        while True:
          data = os.read(self._out_pipe, 1)
          if not data or compat.as_str(data) == self._esc:
            break
          self._output += compat.as_str(data)
      def __str__(self):
        return self._output
    # Passing the config to the server, but not the session should still result
    # in logging device placement.
    config = config_pb2.ConfigProto(log_device_placement=True)
    server = server_lib.Server.create_local_server(config=config)
    a = constant_op.constant(1)
    b = constant_op.constant(2)
    c = a + b
    with session.Session(server.target) as sess:
      with CaptureStderr() as log:
        sess.run(c)
      # Ensure that we did log device placement.
      self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in str(log),
                      str(log))
  def testLocalMasterSessionTimeout(self):
    # Test that the timeout passed in a config to the session works correctly.
    config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
    server = server_lib.Server.create_local_server()
    q = data_flow_ops.FIFOQueue(1, dtypes.float32)
    dequeued_t = q.dequeue()
    with session.Session(server.target, config=config) as sess:
      # Intentionally do not run any enqueue_ops so that dequeue will block
      # until operation_timeout_in_ms.
      with self.assertRaises(errors.DeadlineExceededError):
        sess.run(dequeued_t)
  def testDefaultServerTimeout(self):
    # Test that the default server config timeout gets used when no Session
    # config is provided.
    config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
    server = server_lib.Server.create_local_server(config=config)
    q = data_flow_ops.FIFOQueue(1, dtypes.float32)
    dequeued_t = q.dequeue()
    with session.Session(server.target) as sess:
      # Intentionally do not run any enqueue_ops so that dequeue will block
      # until operation_timeout_in_ms.
      with self.assertRaises(errors.DeadlineExceededError):
        sess.run(dequeued_t)
  def runTestBuildGraphError(self, sess):
    # Ensure that errors from building the graph get propagated.
    data = array_ops.placeholder(dtypes.float32, shape=[])
    enter_1 = control_flow_ops.enter(data, 'foo_1', False)
    enter_2 = control_flow_ops.enter(data, 'foo_2', False)
    res = math_ops.add(enter_1, enter_2)
    with self.assertRaisesOpError('has inputs from different frames'):
      sess.run(res, feed_dict={data: 1.0})
  def testBuildGraphErrorDirect(self):
    self.runTestBuildGraphError(session.Session())
  def testBuildGraphErrorDist(self):
    server = server_lib.Server.create_local_server()
    self.runTestBuildGraphError(session.Session(server.target))
  def testDeviceAttributes(self):
    attrs = session._DeviceAttributes(
        '/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337)
    self.assertEqual(1337, attrs.memory_limit_bytes)
    self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
    self.assertEqual('TYPE', attrs.device_type)
    str_repr = '%s' % attrs
    self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
  def testDeviceAttributesCanonicalization(self):
    attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
                                      'TYPE', 1337)
    self.assertEqual(1337, attrs.memory_limit_bytes)
    self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
    self.assertEqual('TYPE', attrs.device_type)
    str_repr = '%s' % attrs
    self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
  def runTestAddFunctionToSession(self, target=''):
    """Add a function to a session after the graph has already been run."""
    @function.Defun(dtypes.float32)
    def foo(x):
      return x + 1
    x = constant_op.constant(1.0)
    with session.Session(target=target) as sess:
      sess.run(x)
      f = foo(x)
      result = sess.run(f)
      self.assertEqual(result, 2.0)
  @test_util.disable_c_api  # functions don't work with C API
  def testAddFunctionToSession(self):
    self.runTestAddFunctionToSession()
  @test_util.disable_c_api  # functions don't work with C API
  def testAddFunctionToGrpcSession(self):
    server = server_lib.Server.create_local_server()
    self.runTestAddFunctionToSession(server.target)
if __name__ == '__main__':
  googletest.main()
 | 
	apache-2.0 | 2,883,305,308,870,016,000 | 39.994276 | 88 | 0.627868 | false | 
| 
	mhnatiuk/phd_sociology_of_religion | 
	scrapper/lib/python2.7/site-packages/scrapy/contrib/linkextractors/htmlparser.py | 
	11 | 
	2468 | 
	"""
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from urlparse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
    def __init__(self, tag="a", attr="href", process=None, unique=False):
        HTMLParser.__init__(self)
        self.scan_tag = tag if callable(tag) else lambda t: t == tag
        self.scan_attr = attr if callable(attr) else lambda a: a == attr
        self.process_attr = process if callable(process) else lambda v: v
        self.unique = unique
    def _extract_links(self, response_text, response_url, response_encoding):
        self.reset()
        self.feed(response_text)
        self.close()
        links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
        ret = []
        base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
        for link in links:
            if isinstance(link.url, unicode):
                link.url = link.url.encode(response_encoding)
            link.url = urljoin(base_url, link.url)
            link.url = safe_url_string(link.url, response_encoding)
            link.text = link.text.decode(response_encoding)
            ret.append(link)
        return ret
    def extract_links(self, response):
        # wrapper needed to allow to work directly with text
        return self._extract_links(response.body, response.url, response.encoding)
    def reset(self):
        HTMLParser.reset(self)
        self.base_url = None
        self.current_link = None
        self.links = []
    def handle_starttag(self, tag, attrs):
        if tag == 'base':
            self.base_url = dict(attrs).get('href')
        if self.scan_tag(tag):
            for attr, value in attrs:
                if self.scan_attr(attr):
                    url = self.process_attr(value)
                    link = Link(url=url)
                    self.links.append(link)
                    self.current_link = link
    def handle_endtag(self, tag):
        if self.scan_tag(tag):
            self.current_link = None
    def handle_data(self, data):
        if self.current_link:
            self.current_link.text = self.current_link.text + data
    def matches(self, url):
        """This extractor matches with any url, since
        it doesn't contain any patterns"""
        return True
 | 
	gpl-2.0 | -148,817,184,968,540,600 | 31.906667 | 97 | 0.60778 | false | 
| 
	Cynary/distro6.01 | 
	arch/6.01Soft/lib601-F13-4/build/lib/soar/io/io_useif.py | 
	2 | 
	3114 | 
	#
# soar
# io.py - object-oriented interface to the robot
#
# This io file makes use of the "official" soar interface 
# (sonarDistances, etc), and it is still ugly, since it relies on having
# a handle on the brain environment, but it is arguably neater than
# the io.py file.  However it seems to introduce some kind of lag that 
# makes the really complicated labs with localization stuff work poorly
import soar.util
from soar.util import *
robotRadius = 0.2
def configure_io(namespace):
    # need to use global 'cause we don't want to accidentally overwrite
    # the brain environ by setting it to None when io.py is imported
    global io_environ
    io_environ = namespace
class SensorInput():
    global io_environ
    """
    Represents one set of sensor readings from the robot, incluing
    sonars, odometry, and readings from the analogInputs
    """
    def __init__(self, cheat=False):
        self.sonars = io_environ['sonarDistances']()
        if cheat:
            p = io_environ['cheatPose']()
        else:
            p = io_environ['pose']()
        self.odometry = valueListToPose(p)
        self.analogInputs = io_environ['analogInputs']()
    def __str__(self):
        return 'Sonar: ' + util.prettyString(self.sonars) + \
               "; Odo: " + util.prettyString(self.odometry) +\
               "; Analog: " + util.prettyString(self.analogInputs)
referenceVoltage = 5.0
class Action:
    """
    One set of commands to send to the robot
    """
    def __init__(self, fvel = 0.0, rvel = 0.0, 
                 voltage = referenceVoltage,
                 discreteStepLength = None):
        """
        @param fvel: signed number indicating forward velocity in m/s
        @param rvel: signed number indicating rotational velocity in
        rad/sec (?)  positive is left, negative is right
        @param voltage: voltage to send to analog input port of
        control board;  should be between 0 and 10v ??
        @param discreteStepLength: if C{None}, then the robot
        continues driving at the last commanded velocity until a new
        action command is received;  if set to a positive value, the
        robot will drive at the last commanded velocity until
        C{discreteStepLength} seconds have passed, and then stop.
        Setting the step length to, e.g., 0.1, is useful when the
        brain is doing so much computation that the robot drives too
        far between steps.
        """
        self.fvel = fvel
        self.rvel = rvel
        self.voltage = voltage
        self.discreteStepLength = discreteStepLength
    def execute(self):
        if self.discreteStepLength:
            io_environ['discreteMotorOutput'](self.fvel, self.rvel,
                                              self.discreteStepLength)
        else:
            io_environ['motorOutput'](self.fvel, self.rvel)
        io_environ['analogOutput'](self.voltage)
    def __str__(self):
        return 'Act: ' + \
               util.prettyString([self.fvel, self.rvel, self.voltage])
def registerUserFunction(type, f):
    io_environ['registerUserFunction'](type, f)
 | 
	mit | -9,170,203,133,476,238,000 | 36.518072 | 72 | 0.635196 | false | 
| 
	wanghaoran1988/origin | 
	cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py | 
	100 | 
	7192 | 
	#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from charms import layer
from charms.reactive import hook
from charms.reactive import is_state
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core import hookenv
from shlex import split
from subprocess import call
from subprocess import check_call
from subprocess import check_output
@hook('upgrade-charm')
def reset_delivery_states():
    ''' Remove the state set when resources are unpacked. '''
    remove_state('kubernetes-e2e.installed')
@when('kubernetes-e2e.installed')
def messaging():
    ''' Probe our relations to determine the propper messaging to the
    end user '''
    missing_services = []
    if not is_state('kubernetes-master.available'):
        missing_services.append('kubernetes-master')
    if not is_state('certificates.available'):
        missing_services.append('certificates')
    if missing_services:
        if len(missing_services) > 1:
            subject = 'relations'
        else:
            subject = 'relation'
        services = ','.join(missing_services)
        message = 'Missing {0}: {1}'.format(subject, services)
        hookenv.status_set('blocked', message)
        return
    hookenv.status_set('active', 'Ready to test.')
@when_not('kubernetes-e2e.installed')
def install_kubernetes_e2e():
    ''' Deliver the e2e and kubectl components from the binary resource stream
    packages declared in the charm '''
    charm_dir = os.getenv('CHARM_DIR')
    arch = determine_arch()
    # Get the resource via resource_get
    resource = 'e2e_{}'.format(arch)
    try:
        archive = hookenv.resource_get(resource)
    except Exception:
        message = 'Error fetching the {} resource.'.format(resource)
        hookenv.log(message)
        hookenv.status_set('blocked', message)
        return
    if not archive:
        hookenv.log('Missing {} resource.'.format(resource))
        hookenv.status_set('blocked', 'Missing {} resource.'.format(resource))
        return
    # Handle null resource publication, we check if filesize < 1mb
    filesize = os.stat(archive).st_size
    if filesize < 1000000:
        hookenv.status_set('blocked',
                           'Incomplete {} resource.'.format(resource))
        return
    hookenv.status_set('maintenance',
                       'Unpacking {} resource.'.format(resource))
    unpack_path = '{}/files/kubernetes'.format(charm_dir)
    os.makedirs(unpack_path, exist_ok=True)
    cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
    hookenv.log(cmd)
    check_call(cmd)
    services = ['e2e.test', 'ginkgo', 'kubectl']
    for service in services:
        unpacked = '{}/{}'.format(unpack_path, service)
        app_path = '/usr/local/bin/{}'.format(service)
        install = ['install', '-v', unpacked, app_path]
        call(install)
    set_state('kubernetes-e2e.installed')
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
      'tls_client.client.key.saved', 'kubernetes-master.available',
      'kubernetes-e2e.installed')
@when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master):
    ''' Prepare the data to feed to create the kubeconfig file. '''
    layer_options = layer.options('tls-client')
    # Get all the paths to the tls information required for kubeconfig.
    ca = layer_options.get('ca_certificate_path')
    key = layer_options.get('client_key_path')
    cert = layer_options.get('client_certificate_path')
    servers = get_kube_api_servers(master)
    # pedantry
    kubeconfig_path = '/home/ubuntu/.kube/config'
    # Create kubernetes configuration in the default location for ubuntu.
    create_kubeconfig('/root/.kube/config', servers[0], ca, key, cert,
                      user='root')
    create_kubeconfig(kubeconfig_path, servers[0], ca, key, cert,
                      user='ubuntu')
    # Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
    cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
    check_call(cmd)
    set_state('kubeconfig.ready')
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
def set_app_version():
    ''' Declare the application version to juju '''
    cmd = ['kubectl', 'version', '--client']
    from subprocess import CalledProcessError
    try:
        version = check_output(cmd).decode('utf-8')
    except CalledProcessError:
        message = "Missing kubeconfig causes errors. Skipping version set."
        hookenv.log(message)
        return
    git_version = version.split('GitVersion:"v')[-1]
    version_from = git_version.split('",')[0]
    hookenv.application_version_set(version_from.rstrip())
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
                      context='juju-context', cluster='juju-cluster'):
    '''Create a configuration for Kubernetes based on path using the supplied
    arguments for values of the Kubernetes server, CA, key, certificate, user
    context and cluster.'''
    # Create the config file with the address of the master server.
    cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
          '--server={2} --certificate-authority={3} --embed-certs=true'
    check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
    # Create the credentials using the client flags.
    cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
          '--client-key={2} --client-certificate={3} --embed-certs=true'
    check_call(split(cmd.format(kubeconfig, user, key, certificate)))
    # Create a default context with the cluster.
    cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
          '--cluster={2} --user={3}'
    check_call(split(cmd.format(kubeconfig, context, cluster, user)))
    # Make the config use this new context.
    cmd = 'kubectl config --kubeconfig={0} use-context {1}'
    check_call(split(cmd.format(kubeconfig, context)))
def get_kube_api_servers(master):
    '''Return the kubernetes api server address and port for this
    relationship.'''
    hosts = []
    # Iterate over every service from the relation object.
    for service in master.services():
        for unit in service['hosts']:
            hosts.append('https://{0}:{1}'.format(unit['hostname'],
                                                  unit['port']))
    return hosts
def determine_arch():
    ''' dpkg wrapper to surface the architecture we are tied to'''
    cmd = ['dpkg', '--print-architecture']
    output = check_output(cmd).decode('utf-8')
    return output.rstrip()
 | 
	apache-2.0 | -6,698,703,982,956,785,000 | 34.60396 | 78 | 0.663515 | false | 
| 
	darryncampbell/KerrieWorking-Angular | 
	app/bower_components/bootstrap/test-infra/s3_cache.py | 
	1700 | 
	3523 | 
	#!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
    BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
    raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
    hasher = sha256()
    with open(filename, 'rb') as input_file:
        hasher.update(input_file.read())
    file_hash = hasher.hexdigest()
    print('sha256({}) = {}'.format(filename, file_hash))
    return file_hash
def _delete_file_quietly(filename):
    try:
        _delete_file(filename)
    except (OSError, IOError):
        pass
def _tarball_size(directory):
    kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
    return "{} MiB".format(kib)
def _tarball_filename_for(directory):
    return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
    print("Creating tarball of {}...".format(directory))
    run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
    print("Extracting tarball of {}...".format(directory))
    run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
    _delete_file_quietly(NEED_TO_UPLOAD_MARKER)
    try:
        print("Downloading {} tarball from S3...".format(friendly_name))
        key.get_contents_to_filename(_tarball_filename_for(directory))
    except S3ResponseError as err:
        open(NEED_TO_UPLOAD_MARKER, 'a').close()
        print(err)
        raise SystemExit("Cached {} download failed!".format(friendly_name))
    print("Downloaded {}.".format(_tarball_size(directory)))
    _extract_tarball(directory)
    print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
    _create_tarball(directory)
    print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
    key.set_contents_from_filename(_tarball_filename_for(directory))
    print("{} cache successfully updated.".format(friendly_name))
    _delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
    # Uses environment variables:
    #   AWS_ACCESS_KEY_ID -- AWS Access Key ID
    #   AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
    argv.pop(0)
    if len(argv) != 4:
        raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
    mode, friendly_name, dependencies_file, directory = argv
    conn = S3Connection()
    bucket = conn.lookup(BUCKET_NAME, validate=False)
    if bucket is None:
        raise SystemExit("Could not access bucket!")
    dependencies_file_hash = _sha256_of_file(dependencies_file)
    key = Key(bucket, dependencies_file_hash)
    key.storage_class = 'REDUCED_REDUNDANCY'
    if mode == 'download':
        download(directory)
    elif mode == 'upload':
        if isfile(NEED_TO_UPLOAD_MARKER):  # FIXME
            upload(directory)
        else:
            print("No need to upload anything.")
    else:
        raise SystemExit("Unrecognized mode {!r}".format(mode))
 | 
	mit | 2,759,179,791,188,848,600 | 31.925234 | 114 | 0.675844 | false | 
| 
	njvack/ge-mri-rtafni | 
	scanner-console/vendor/dicom/contrib/imViewer_Simple.py | 
	4 | 
	12358 | 
	#==========================================================================
# imViewer-Simple.py
#
#    An example program that opens uncompressed DICOM images and
# converts them via numPy and PIL to be viewed in wxWidgets GUI
# apps.  The conversion is currently:
#
#    pydicom->NumPy->PIL->wxPython.Image->wxPython.Bitmap
#
# Gruesome but it mostly works.  Surely there is at least one
# of these steps that could be eliminated (probably PIL) but
# haven't tried that yet and I may want some of the PIL manipulation
# functions.
#
#    This won't handle RLE, embedded JPEG-Lossy, JPEG-lossless,
# JPEG2000, old ACR/NEMA files, or anything wierd.  Also doesn't
# handle some RGB images that I tried.
#
#    Have added Adit Panchal's LUT code.  It helps a lot, but needs
# to be further generalized.  Added test for window and/or level
# as 'list' type - crude, but it worked for a bunch of old MR and
# CT slices I have.
#
# Testing:      minimal
#               Tried only on WinXP sp2 using numpy 1.3.0
#               and PIL 1.1.7b1, Python 2.6.4, and wxPython 2.8.10.1
#
# Dave Witten:  Nov. 11, 2009
#==========================================================================
import os
import os.path
import sys
import dicom
import wx
have_PIL = True
try:
    import PIL.Image
except:
    have_PIL = False
have_numpy = True
try:
    import numpy as np
except:
    have_numpy = False
#----------------------------------------------------------------
#  Initialize image capabilities.
#----------------------------------------------------------------
wx.InitAllImageHandlers()
def MsgDlg(window, string, caption='OFAImage', style=wx.YES_NO | wx.CANCEL):
    """Common MessageDialog."""
    dlg = wx.MessageDialog(window, string, caption, style)
    result = dlg.ShowModal()
    dlg.Destroy()
    return result
class ImFrame(wx.Frame):
    """Class for main window."""
    def __init__(self, parent, title):
        """Create the pydicom image example's main frame window."""
        wx.Frame.__init__(self, parent, id=-1, title="", pos=wx.DefaultPosition,
                          size=wx.Size(w=1024, h=768),
                          style=wx.DEFAULT_FRAME_STYLE | wx.SUNKEN_BORDER | wx.CLIP_CHILDREN)
        #--------------------------------------------------------
        # Set up the menubar.
        #--------------------------------------------------------
        self.mainmenu = wx.MenuBar()
        # Make the 'File' menu.
        menu = wx.Menu()
        item = menu.Append(wx.ID_ANY, '&Open', 'Open file for editing')
        self.Bind(wx.EVT_MENU, self.OnFileOpen, item)
        item = menu.Append(wx.ID_ANY, 'E&xit', 'Exit Program')
        self.Bind(wx.EVT_MENU, self.OnFileExit, item)
        self.mainmenu.Append(menu, '&File')
        # Attach the menu bar to the window.
        self.SetMenuBar(self.mainmenu)
        #--------------------------------------------------------
        # Set up the main splitter window.
        #--------------------------------------------------------
        self.mainSplitter = wx.SplitterWindow(self, style=wx.NO_3D | wx.SP_3D)
        self.mainSplitter.SetMinimumPaneSize(1)
        #-------------------------------------------------------------
        # Create the folderTreeView on the left.
        #-------------------------------------------------------------
        self.dsTreeView = wx.TreeCtrl(self.mainSplitter, style=wx.TR_LINES_AT_ROOT | wx.TR_HAS_BUTTONS)
        #--------------------------------------------------------
        # Create the ImageView on the right pane.
        #--------------------------------------------------------
        self.imView = wx.Panel(self.mainSplitter, style=wx.VSCROLL | wx.HSCROLL | wx.CLIP_CHILDREN)
        self.imView.Bind(wx.EVT_PAINT, self.OnPaint)
        self.imView.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
        self.imView.Bind(wx.EVT_SIZE, self.OnSize)
        #--------------------------------------------------------
        # Install the splitter panes.
        #--------------------------------------------------------
        self.mainSplitter.SplitVertically(self.dsTreeView, self.imView)
        self.mainSplitter.SetSashPosition(300, True)
        #--------------------------------------------------------
        # Initialize some values
        #--------------------------------------------------------
        self.dcmdsRoot = False
        self.foldersRoot = False
        self.loadCentered = True
        self.bitmap = None
        self.Show(True)
    def OnFileExit(self, event):
        """Exits the program."""
        self.Destroy()
        event.Skip()
    def OnSize(self, event):
        "Window 'size' event."
        self.Refresh()
    def OnEraseBackground(self, event):
        "Window 'erase background' event."
        pass
    def populateTree(self, ds):
        """ Populate the tree in the left window with the [desired]
        dataset values"""
        if not self.dcmdsRoot:
            self.dcmdsRoot = self.dsTreeView.AddRoot(text="DICOM Objects")
        else:
            self.dsTreeView.DeleteChildren(self.dcmdsRoot)
        self.recurse_tree(ds, self.dcmdsRoot)
        self.dsTreeView.ExpandAll()
    def recurse_tree(self, ds, parent, hide=False):
        """ order the dicom tags """
        for data_element in ds:
            if isinstance(data_element.value, unicode):
                ip = self.dsTreeView.AppendItem(parent, text=unicode(data_element))
            else:
                ip = self.dsTreeView.AppendItem(parent, text=str(data_element))
            if data_element.VR == "SQ":
                for i, ds in enumerate(data_element.value):
                    sq_item_description = data_element.name.replace(" Sequence", "")
                    item_text = "%s %d" % (sq_item_description, i + 1)
                    parentNodeID = self.dsTreeView.AppendItem(ip, text=item_text.rjust(128))
                    self.recurse_tree(ds, parentNodeID)
## --- Most of what is important happens below this line ---------------------
    def OnFileOpen(self, event):
        """Opens a selected file."""
        dlg = wx.FileDialog(self, 'Choose a file to add.', '', '', '*.*', wx.OPEN)
        if dlg.ShowModal() == wx.ID_OK:
            fullPath = dlg.GetPath()
            imageFile = dlg.GetFilename()
            #checkDICMHeader()
            self.show_file(imageFile, fullPath)
    def OnPaint(self, event):
        "Window 'paint' event."
        dc = wx.PaintDC(self.imView)
        dc = wx.BufferedDC(dc)
        # paint a background just so it isn't *so* boring.
        dc.SetBackground(wx.Brush("WHITE"))
        dc.Clear()
        dc.SetBrush(wx.Brush("GREY", wx.CROSSDIAG_HATCH))
        windowsize = self.imView.GetSizeTuple()
        dc.DrawRectangle(0, 0, windowsize[0], windowsize[1])
        bmpX0 = 0
        bmpY0 = 0
        if self.bitmap is not None:
            if self.loadCentered:
                bmpX0 = (windowsize[0] - self.bitmap.Width) / 2
                bmpY0 = (windowsize[1] - self.bitmap.Height) / 2
            dc.DrawBitmap(self.bitmap, bmpX0, bmpY0, False)
    #------------------------------------------------------------
    #  ImFrame.ConvertWXToPIL()
    #  Expropriated from Andrea Gavana's
    #  ShapedButton.py in the wxPython dist
    #------------------------------------------------------------
    def ConvertWXToPIL(self, bmp):
        """ Convert wx.Image Into PIL Image. """
        width = bmp.GetWidth()
        height = bmp.GetHeight()
        im = wx.EmptyImage(width, height)
        im.fromarray("RGBA", (width, height), bmp.GetData())
        return img
    #------------------------------------------------------------
    #  ImFrame.ConvertPILToWX()
    #  Expropriated from Andrea Gavana's
    #  ShapedButton.py in the wxPython dist
    #------------------------------------------------------------
    def ConvertPILToWX(self, pil, alpha=True):
        """ Convert PIL Image Into wx.Image. """
        if alpha:
            image = apply(wx.EmptyImage, pil.size)
            image.SetData(pil.convert("RGB").tostring())
            image.SetAlphaData(pil.convert("RGBA").tostring()[3::4])
        else:
            image = wx.EmptyImage(pil.size[0], pil.size[1])
            new_image = pil.convert('RGB')
            data = new_image.tostring()
            image.SetData(data)
        return image
    def get_LUT_value(self, data, window, level):
        """Apply the RGB Look-Up Table for the given data and window/level value."""
        if not have_numpy:
            raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
        if isinstance(window, list):
            window = window[0]
        if isinstance(level, list):
            level = level[0]
        return np.piecewise(data,
                            [data <= (level - 0.5 - (window - 1) / 2),
                             data > (level - 0.5 + (window - 1) / 2)],
                            [0, 255, lambda data: ((data - (level - 0.5)) / (window - 1) + 0.5) * (255 - 0)]
                           )
    #-----------------------------------------------------------
    # ImFrame.loadPIL_LUT(dataset)
    # Display an image using the Python Imaging Library (PIL)
    #-----------------------------------------------------------
    def loadPIL_LUT(self, dataset):
        if not have_PIL:
            raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install")
        if('PixelData' not in dataset):
            raise TypeError("Cannot show image -- DICOM dataset does not have pixel data")
        if('WindowWidth' not in dataset) or ('WindowCenter' not in dataset):  # can only apply LUT if these values exist
            bits = dataset.BitsAllocated
            samples = dataset.SamplesPerPixel
            if bits == 8 and samples == 1:
                mode = "L"
            elif bits == 8 and samples == 3:
                mode = "RGB"
            elif bits == 16:  # not sure about this -- PIL source says is 'experimental' and no documentation.
                mode = "I;16"  # Also, should bytes swap depending on endian of file and system??
            else:
                raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples))
            size = (dataset.Columns, dataset.Rows)
            im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1)  # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm
        else:
            image = self.get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter)
            im = PIL.Image.fromarray(image).convert('L')  # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm
        return im
    def show_file(self, imageFile, fullPath):
        """ Load the DICOM file, make sure it contains at least one
        image, and set it up for display by OnPaint().  ** be
        careful not to pass a unicode string to read_file or it will
        give you 'fp object does not have a defer_size attribute,
        or some such."""
        ds = dicom.read_file(str(fullPath))
        ds.decode()                                         # change strings to unicode
        self.populateTree(ds)
        if 'PixelData' in ds:
            self.dImage = self.loadPIL_LUT(ds)
            if self.dImage is not None:
                tmpImage = self.ConvertPILToWX(self.dImage, False)
                self.bitmap = wx.BitmapFromImage(tmpImage)
                self.Refresh()
##------ This is just the initialization of the App  -------------------------
#=======================================================
# The main App Class.
#=======================================================
class App(wx.App):
    """Image Application."""
    def OnInit(self):
        """Create the Image Application."""
        frame = ImFrame(None, 'wxImage Example')
        return True
#---------------------------------------------------------------------
# If this file is running as main or a standalone test, begin execution here.
#---------------------------------------------------------------------
if __name__ == '__main__':
    app = App(0)
    app.MainLoop()
 | 
	mit | -4,177,575,431,541,067,000 | 40.891525 | 185 | 0.515941 | false | 
| 
	vadimtk/chrome4sdp | 
	tools/telemetry/third_party/gsutilz/third_party/pyasn1/test/type/test_constraint.py | 
	53 | 
	8746 | 
	from pyasn1.type import constraint, error
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
   version_info[0:2] in ( (3, 0), (3, 1) ):
    try:
        import unittest2 as unittest
    except ImportError:
        import unittest
else:
    import unittest
class SingleValueConstraintTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.SingleValueConstraint(1,2)
        self.c2 = constraint.SingleValueConstraint(3,4)
    def testCmp(self): assert self.c1 == self.c1, 'comparation fails'
    def testHash(self): assert hash(self.c1) != hash(self.c2), 'hash() fails'
    def testGoodVal(self):
        try:
            self.c1(1)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(4)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class ContainedSubtypeConstraintTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ContainedSubtypeConstraint(
            constraint.SingleValueConstraint(12)
            )
    def testGoodVal(self):
        try:
            self.c1(12)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(4)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class ValueRangeConstraintTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ValueRangeConstraint(1,4)
    def testGoodVal(self):
        try:
            self.c1(1)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(-5)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class ValueSizeConstraintTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ValueSizeConstraint(1,2)
    def testGoodVal(self):
        try:
            self.c1('a')
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1('abc')
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
    def setUp(self):
        self.c1 = constraint.PermittedAlphabetConstraint('A', 'B', 'C')
        self.c2 = constraint.PermittedAlphabetConstraint('DEF')
    def testGoodVal(self):
        try:
            self.c1('A')
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1('E')
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class ConstraintsIntersectionTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ConstraintsIntersection(
            constraint.SingleValueConstraint(4),
            constraint.ValueRangeConstraint(2, 4)
            )
    def testCmp1(self):
        assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
    def testCmp2(self):
        assert constraint.SingleValueConstraint(5) not in self.c1, \
               '__cmp__() fails'
    def testCmp3(self):
        c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
            constraint.SingleValueConstraint(4),
            constraint.ValueRangeConstraint(2, 4)
            ))
        assert self.c1 in c, '__cmp__() fails'
    def testCmp4(self):
        c = constraint.ConstraintsUnion(
            constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
            )
        assert self.c1 not in c, '__cmp__() fails'
        
    def testGoodVal(self):
        try:
            self.c1(4)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(-5)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class InnerTypeConstraintTestCase(unittest.TestCase):
    def testConst1(self):
        c = constraint.InnerTypeConstraint(
            constraint.SingleValueConstraint(4)
            )
        try:
            c(4, 32)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
        try:
            c(5, 32)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
    def testConst2(self):
        c = constraint.InnerTypeConstraint(
            (0, constraint.SingleValueConstraint(4), 'PRESENT'),
            (1, constraint.SingleValueConstraint(4), 'ABSENT')
            )
        try:
            c(4, 0)
        except error.ValueConstraintError:
            raise
            assert 0, 'constraint check fails'            
        try:
            c(4, 1)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'            
        try:
            c(3, 0)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'            
# Constraints compositions
class ConstraintsIntersectionTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ConstraintsIntersection(
            constraint.ValueRangeConstraint(1, 9),
            constraint.ValueRangeConstraint(2, 5)
            )
    def testGoodVal(self):
        try:
            self.c1(3)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(0)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class ConstraintsUnionTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ConstraintsUnion(
            constraint.SingleValueConstraint(5),
            constraint.ValueRangeConstraint(1, 3)
            )
    def testGoodVal(self):
        try:
            self.c1(2)
            self.c1(5)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(-5)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
class ConstraintsExclusionTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ConstraintsExclusion(
            constraint.ValueRangeConstraint(2, 4)
            )
    def testGoodVal(self):
        try:
            self.c1(6)
        except error.ValueConstraintError:
            assert 0, 'constraint check fails'
    def testBadVal(self):
        try:
            self.c1(2)
        except error.ValueConstraintError:
            pass
        else:
            assert 0, 'constraint check fails'
# Constraints derivations
class DirectDerivationTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.SingleValueConstraint(5)
        self.c2 = constraint.ConstraintsUnion(
            self.c1, constraint.ValueRangeConstraint(1, 3)
            )
    def testGoodVal(self):
        assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
        assert not self.c1.isSubTypeOf(self.c2) , 'isSubTypeOf failed'
    def testBadVal(self):
        assert not self.c2.isSuperTypeOf(self.c1) , 'isSuperTypeOf failed'
        assert self.c2.isSubTypeOf(self.c1) , 'isSubTypeOf failed'
class IndirectDerivationTestCase(unittest.TestCase):
    def setUp(self):
        self.c1 = constraint.ConstraintsIntersection(
            constraint.ValueRangeConstraint(1, 30)
            )
        self.c2 = constraint.ConstraintsIntersection(
            self.c1, constraint.ValueRangeConstraint(1, 20)
            )
        self.c2 = constraint.ConstraintsIntersection(
            self.c2, constraint.ValueRangeConstraint(1, 10)
            )
    def testGoodVal(self):
        assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
        assert not self.c1.isSubTypeOf(self.c2) , 'isSubTypeOf failed'
    def testBadVal(self):
        assert not self.c2.isSuperTypeOf(self.c1) , 'isSuperTypeOf failed'
        assert self.c2.isSubTypeOf(self.c1) , 'isSubTypeOf failed'
        
if __name__ == '__main__': unittest.main()
# how to apply size constriants to constructed types?
 | 
	bsd-3-clause | 4,188,259,075,324,585,500 | 30.235714 | 83 | 0.601418 | false | 
| 
	pschmitt/home-assistant | 
	homeassistant/components/tof/sensor.py | 
	11 | 
	3309 | 
	"""Platform for Time of Flight sensor VL53L1X from STMicroelectronics."""
import asyncio
from functools import partial
import logging
from VL53L1X2 import VL53L1X  # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components import rpi_gpio
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
LENGTH_MILLIMETERS = "mm"
CONF_I2C_ADDRESS = "i2c_address"
CONF_I2C_BUS = "i2c_bus"
CONF_XSHUT = "xshut"
DEFAULT_NAME = "VL53L1X"
DEFAULT_I2C_ADDRESS = 0x29
DEFAULT_I2C_BUS = 1
DEFAULT_XSHUT = 16
DEFAULT_RANGE = 2
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
        vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
        vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int),
        vol.Optional(CONF_XSHUT, default=DEFAULT_XSHUT): cv.positive_int,
    }
)
def init_tof_0(xshut, sensor):
    """XSHUT port LOW resets the device."""
    sensor.open()
    rpi_gpio.setup_output(xshut)
    rpi_gpio.write_output(xshut, 0)
def init_tof_1(xshut):
    """XSHUT port HIGH enables the device."""
    rpi_gpio.setup_output(xshut)
    rpi_gpio.write_output(xshut, 1)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
    """Reset and initialize the VL53L1X ToF Sensor from STMicroelectronics."""
    name = config.get(CONF_NAME)
    bus_number = config.get(CONF_I2C_BUS)
    i2c_address = config.get(CONF_I2C_ADDRESS)
    unit = LENGTH_MILLIMETERS
    xshut = config.get(CONF_XSHUT)
    sensor = await hass.async_add_executor_job(partial(VL53L1X, bus_number))
    await hass.async_add_executor_job(init_tof_0, xshut, sensor)
    await asyncio.sleep(0.01)
    await hass.async_add_executor_job(init_tof_1, xshut)
    await asyncio.sleep(0.01)
    dev = [VL53L1XSensor(sensor, name, unit, i2c_address)]
    async_add_entities(dev, True)
class VL53L1XSensor(Entity):
    """Implementation of VL53L1X sensor."""
    def __init__(self, vl53l1x_sensor, name, unit, i2c_address):
        """Initialize the sensor."""
        self._name = name
        self._unit_of_measurement = unit
        self.vl53l1x_sensor = vl53l1x_sensor
        self.i2c_address = i2c_address
        self._state = None
        self.init = True
    @property
    def name(self) -> str:
        """Return the name of the sensor."""
        return self._name
    @property
    def state(self) -> int:
        """Return the state of the sensor."""
        return self._state
    @property
    def unit_of_measurement(self) -> str:
        """Return the unit of measurement."""
        return self._unit_of_measurement
    def update(self):
        """Get the latest measurement and update state."""
        if self.init:
            self.vl53l1x_sensor.add_sensor(self.i2c_address, self.i2c_address)
            self.init = False
        self.vl53l1x_sensor.start_ranging(self.i2c_address, DEFAULT_RANGE)
        self.vl53l1x_sensor.update(self.i2c_address)
        self.vl53l1x_sensor.stop_ranging(self.i2c_address)
        self._state = self.vl53l1x_sensor.distance
 | 
	apache-2.0 | -2,562,695,181,623,962,600 | 29.638889 | 86 | 0.682079 | false | 
| 
	claneys/shinken | 
	test/test_servicetpl_no_hostname.py | 
	18 | 
	2049 | 
	#!/usr/bin/env python
# Copyright (C) 2009-2014:
#    Gabes Jean, [email protected]
#    Gerhard Lausser, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestsericeTplNoHostname(ShinkenTest):
    def setUp(self):
        self.setup_with_file('etc/shinken_servicetpl_no_hostname.cfg')
    def test_dummy(self):
        #
        # Config is not correct because of a wrong relative path
        # in the main config file
        #
        print "Get the hosts and services"
        now = time.time()
        host = self.sched.hosts.find_by_name("test_host_0")
        host.checks_in_progress = []
        host.act_depend_of = []  # ignore the router
        router = self.sched.hosts.find_by_name("test_router_0")
        router.checks_in_progress = []
        router.act_depend_of = []  # ignore the router
        svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
        svc.checks_in_progress = []
        svc.act_depend_of = []  # no hostchecks on critical checkresults
        self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
        self.assertEqual('UP', host.state)
        self.assertEqual('HARD', host.state_type)
if __name__ == '__main__':
    unittest.main()
 | 
	agpl-3.0 | -1,834,636,403,029,674,500 | 36.254545 | 134 | 0.666179 | false | 
| 
	hovsepm/AutoRest | 
	src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyDateTime/autorestdatetimetestservice/auto_rest_date_time_test_service.py | 
	3 | 
	2116 | 
	# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.datetime_model_operations import DatetimeModelOperations
from . import models
class AutoRestDateTimeTestServiceConfiguration(Configuration):
    """Configuration for AutoRestDateTimeTestService
    Note that all parameters used to create this instance are saved as instance
    attributes.
    :param str base_url: Service URL
    """
    def __init__(
            self, base_url=None):
        if not base_url:
            base_url = 'https://localhost'
        super(AutoRestDateTimeTestServiceConfiguration, self).__init__(base_url)
        self.add_user_agent('autorestdatetimetestservice/{}'.format(VERSION))
class AutoRestDateTimeTestService(object):
    """Test Infrastructure for AutoRest
    :ivar config: Configuration for client.
    :vartype config: AutoRestDateTimeTestServiceConfiguration
    :ivar datetime_model: DatetimeModel operations
    :vartype datetime_model: .operations.DatetimeModelOperations
    :param str base_url: Service URL
    """
    def __init__(
            self, base_url=None):
        self.config = AutoRestDateTimeTestServiceConfiguration(base_url)
        self._client = ServiceClient(None, self.config)
        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
        self._serialize = Serializer(client_models)
        self._deserialize = Deserializer(client_models)
        self.datetime_model = DatetimeModelOperations(
            self._client, self.config, self._serialize, self._deserialize)
 | 
	mit | -620,560,233,606,112,000 | 33.688525 | 89 | 0.669187 | false | 
| 
	LibreSoftTeam/2016-uml-miner | 
	phase3-outputs/scripts/umlfiles2table_images.py | 
	1 | 
	2885 | 
	#!/usr/bin/python3
import csv
import pymysql
singleList = []
multipleList = []
# Connect to the database
connection = pymysql.connect(
                host='localhost',
                user='operator',
                passwd='operator',
                db='chunk4_images',
                charset='utf8mb4',
                cursorclass=pymysql.cursors.DictCursor)
with open('updated4_images.csv', 'r') as csvfile:
    for myupdatedCSV in csv.reader(csvfile):
        if myupdatedCSV[3] == "UML":
            updatedCSV = myupdatedCSV[2].split('/')
            repo = updatedCSV[3] + "/" + updatedCSV[4]
            fileurl = myupdatedCSV[2]
            filename = fileurl.split('/')[-1]
            filepath = '/'.join(fileurl.split('/')[6:])
            if 'https://raw.githubusercontent.com/' not in fileurl:
                continue
            # Get repo id from database
            cursor = connection.cursor()
            sql = 'SELECT id FROM repositories WHERE uri="'
            sql += 'https://github.com/{0}"'.format(repo)
    #        print(sql)
            cursor.execute(sql)
            result = cursor.fetchone()
            try:
                repo_id = result['id']
    #            print(repo_id)
            except:
                #print("# Error", result, repo)
                continue
            # Get file id from database
            sql = 'SELECT id FROM files WHERE repository_id={0} and file_name="{1}"'.format(repo_id, filename)
    #        print(sql)
            cursor.execute(sql)
            if cursor.rowcount == 1:
                result = cursor.fetchone()
                file_id = result['id']
                singleList.append((file_id, repo_id, fileurl.replace("'", "\\'"), filepath.replace("'", "\\'")))
            else:
                result = cursor.fetchall()
    #            print("Warning:", result, filepath)
                found = 0
                for file in result:
                    sql = 'SELECT file_path from file_links WHERE file_id={0}'.format(file['id'])
    #                print(sql)
                    cursor.execute(sql)
                    result = cursor.fetchone()
                    db_path = result['file_path']
                    if db_path == filepath:
                        singleList.append((file['id'], repo_id, fileurl.replace("'", "\\'"), filepath.replace("'", "\\'")))
                        found = 1
                        break
                #if not found:
                    #print("# ERROR:", filepath, "not found")
connection.close()
# Write data into database
create = """
USE chunk4_images;
CREATE TABLE uml_files (
  id int,
  repository_id int,
  file_url VARCHAR(255),
  file_path VARCHAR(255)
);
"""
print(create)
for entry in singleList:
    print("INSERT INTO uml_files (id, repository_id, file_url, file_path) VALUES ({0}, {1}, '{2}', '{3}');".format(*entry))
 | 
	gpl-3.0 | -7,699,399,729,735,828,000 | 32.16092 | 123 | 0.49844 | false | 
| 
	IBM/differential-privacy-library | 
	tests/mechanisms/test_ExponentialCategorical.py | 
	1 | 
	4894 | 
	import numpy as np
from unittest import TestCase
from diffprivlib.mechanisms import ExponentialCategorical
from diffprivlib.utils import global_seed
class TestExponential(TestCase):
    def setup_method(self, method):
        if method.__name__ .endswith("prob"):
            global_seed(314159)
        self.mech = ExponentialCategorical
    def teardown_method(self, method):
        del self.mech
    def test_class(self):
        from diffprivlib.mechanisms import DPMechanism
        self.assertTrue(issubclass(ExponentialCategorical, DPMechanism))
    def test_inf_epsilon(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2]
        ]
        mech = self.mech(epsilon=float("inf"), utility_list=utility_list)
        # print(_mech.randomise("A"))
        for i in range(1000):
            self.assertEqual(mech.randomise("A"), "A")
    def test_nonzero_delta(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2]
        ]
        mech = self.mech(epsilon=1, utility_list=utility_list)
        mech.delta = 0.1
        with self.assertRaises(ValueError):
            mech.randomise("A")
    def test_non_string_hierarchy(self):
        utility_list = [
            ["A", "B", 1],
            ["A", 2, 2],
            ["B", 2, 2]
        ]
        with self.assertRaises(TypeError):
            self.mech(epsilon=1, utility_list=utility_list)
    def test_missing_utilities(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2]
        ]
        with self.assertRaises(ValueError):
            self.mech(epsilon=1, utility_list=utility_list)
    def test_wrong_utilities(self):
        utility_list = (
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2]
        )
        with self.assertRaises(TypeError):
            self.mech(epsilon=1, utility_list=utility_list)
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", "2"]
        ]
        with self.assertRaises(TypeError):
            self.mech(epsilon=1, utility_list=utility_list)
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", -2]
        ]
        with self.assertRaises(ValueError):
            self.mech(epsilon=1, utility_list=utility_list)
    def test_non_string_input(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2]
        ]
        mech = self.mech(epsilon=1, utility_list=utility_list)
        with self.assertRaises(TypeError):
            mech.randomise(2)
    def test_outside_domain(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2]
        ]
        mech = self.mech(epsilon=1, utility_list=utility_list)
        with self.assertRaises(ValueError):
            mech.randomise("D")
    def test_get_utility_list(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["C", "B", 2]
        ]
        mech = self.mech(epsilon=1, utility_list=utility_list)
        _utility_list = mech.utility_list
        self.assertEqual(len(_utility_list), len(utility_list))
    def test_self_in_utility(self):
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2],
            ["A", "A", 5]
        ]
        mech = self.mech(epsilon=1, utility_list=utility_list)
        _utility_list = mech.utility_list
        self.assertEqual(len(_utility_list) + 1, len(utility_list))
        self.assertEqual(mech._get_utility("A", "A"), 0)
    def test_distrib_prob(self):
        epsilon = np.log(2)
        runs = 20000
        utility_list = [
            ["A", "B", 1],
            ["A", "C", 2],
            ["B", "C", 2]
        ]
        mech = self.mech(epsilon=epsilon, utility_list=utility_list)
        count = [0, 0, 0]
        for i in range(runs):
            val = mech.randomise("A")
            if val == "A":
                count[0] += 1
            elif val == "B":
                count[1] += 1
            elif val == "C":
                count[2] += 1
        # print("A: %d, B: %d, C: %d" % (count[0], count[1], count[2]))
        self.assertLessEqual(count[0] / runs, np.exp(epsilon) * count[2] / runs + 0.05)
        self.assertAlmostEqual(count[0] / count[1], count[1] / count[2], delta=0.15)
    def test_repr(self):
        repr_ = repr(self.mech(epsilon=1, utility_list=[]))
        self.assertIn(".ExponentialCategorical(", repr_)
    def test_bias(self):
        self.assertRaises(NotImplementedError, self.mech(epsilon=1, utility_list=[]).bias, 0)
    def test_variance(self):
        self.assertRaises(NotImplementedError, self.mech(epsilon=1, utility_list=[]).variance, 0)
 | 
	mit | -5,557,917,907,070,474,000 | 28.305389 | 97 | 0.492848 | false | 
| 
	lewisc/spark-tk | 
	python/sparktk/models/timeseries/arx.py | 
	7 | 
	16906 | 
	# vim: set encoding=utf-8
#  Copyright (c) 2016 Intel Corporation 
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
"""
ARX (autoregressive exogenous) Model
"""
from sparktk.loggers import log_load; log_load(__name__); del log_load
from sparktk import TkContext
from sparktk.propobj import PropertiesObject
__all__ = ["train", "load", "ArxModel"]
def train(frame, ts_column, x_columns, y_max_lag, x_max_lag, no_intercept=False):
    """
    Creates a ARX model by training on the given frame. Fit an autoregressive model with additional
    exogenous variables.
    Parameters
    ----------
    :param frame: (Frame) Frame used for training
    :param ts_column: (str) Name of the column that contains the time series values.
    :param x_columns: (List(str)) Names of the column(s) that contain the values of exogenous regressors.
    :param y_max_lag: (int) The maximum lag order for the dependent (time series) variable.
    :param x_max_lag: (int) The maximum lag order for exogenous variables.
    :param no_intercept: (bool) A boolean flag indicating if the intercept should be dropped. Default is false.
    :return: (ArxModel) Trained ARX model
    Notes
    -----
    1.  Dataset being trained must be small enough to be worked with on a single node.
    +   If the specified set of exogenous variables is not invertible, an exception is
        thrown stating that the "matrix is singular".  This happens when there are
        certain patterns in the dataset or columns of all zeros.  In order to work
        around the singular matrix issue, try selecting a different set of columns for
        exogenous variables, or use a different time window for training.
    """
    # check parameter/types
    if not isinstance(ts_column, basestring):
        raise TypeError("'ts_column' should be a string (name of the column that has the timeseries value).")
    if not isinstance(x_columns, list) or not all(isinstance(c, str) for c in x_columns):
        raise TypeError("'x_columns' should be a list of strings (names of the exogenous columns).")
    elif len(x_columns) <= 0:
        raise ValueError("'x_columns' should not be empty.")
    if not isinstance(x_max_lag, int):
        raise TypeError("'x_max_lag' should be an integer.")
    if not isinstance(y_max_lag, int):
        raise TypeError("'y_max_lag' should be an integer.")
    if not isinstance(no_intercept, bool):
        raise TypeError("'no_intercept' should be a boolean.")
    tc = frame._tc
    _scala_obj = get_scala_obj(tc)
    scala_x_columns = tc.jutils.convert.to_scala_vector_string(x_columns)
    scala_model = _scala_obj.train(frame._scala, ts_column, scala_x_columns, y_max_lag, x_max_lag, no_intercept)
    return ArxModel(tc, scala_model)
def load(path, tc=TkContext.implicit):
    """load ArxModel from given path"""
    TkContext.validate(tc)
    return tc.load(path, ArxModel)
def get_scala_obj(tc):
    """Gets reference to the ArxModel scala object"""
    return tc.sc._jvm.org.trustedanalytics.sparktk.models.timeseries.arx.ArxModel
class ArxModel(PropertiesObject):
    """
    A trained ARX model.
    
    Example
    -------
    
    Consider the following model trained and tested on the sample data set in *frame* 'frame'.
    The frame has a snippet of air quality data from:
    https://archive.ics.uci.edu/ml/datasets/Air+Quality.
    Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
    Irvine, CA: University of California, School of Information and Computer Science.
    
    <hide>
        
        >>> schema = [('Date', str),('Time', str),('CO_GT', float),('PT08_S1_CO', int),('NMHC_GT', int),
        ...           ('C6H6_GT', float),('PT08_S2_NMHC', int),('NOx_GT', int),('PT08_S3_NOx', int),('NO2_GT', int),
        ...           ('PT08_S4_NO2', int),('PT08_S5_O3_', int),('T', float),('RH', float),('AH', float)]
        >>> frame = tc.frame.create([["10/03/2004","18.00.00",2.6,1360,150,11.9,1046,166,1056,113,1692,1268,13.6,48.9,0.7578],
        ...                          ["10/03/2004","19.00.00",2,1292,112,9.4,955,103,1174,92,1559,972,13.3,47.7,0.7255],
        ...                          ["10/03/2004","20.00.00",2.2,1402,88,9.0,939,131,1140,114,1555,1074,11.9,54.0,0.7502],
        ...                          ["10/03/2004","21.00.00",2.2,1376,80,9.2,948,172,1092,122,1584,1203,11.0,60.0,0.7867],
        ...                          ["10/03/2004","22.00.00",1.6,1272,51,6.5,836,131,1205,116,1490,1110,11.2,59.6,0.7888],
        ...                          ["10/03/2004","23.00.00",1.2,1197,38,4.7,750,89,1337,96,1393,949,11.2,59.2,0.7848],
        ...                          ["11/03/2004","00.00.00",1.2,1185,31,3.6,690,62,1462,77,1333,733,11.3,56.8,0.7603],
        ...                          ["11/03/2004","01.00.00",1,1136,31,3.3,672,62,1453,76,1333,730,10.7,60.0,0.7702],
        ...                          ["11/03/2004","02.00.00",0.9,1094,24,2.3,609,45,1579,60,1276,620,10.7,59.7,0.7648],
        ...                          ["11/03/2004","03.00.00",0.6,1010,19,1.7,561,-200,1705,-200,1235,501,10.3,60.2,0.7517],
        ...                          ["11/03/2004","04.00.00",-200,1011,14,1.3,527,21,1818,34,1197,445,10.1,60.5,0.7465],
        ...                          ["11/03/2004","05.00.00",0.7,1066,8,1.1,512,16,1918,28,1182,422,11.0,56.2,0.7366],
        ...                          ["11/03/2004","06.00.00",0.7,1052,16,1.6,553,34,1738,48,1221,472,10.5,58.1,0.7353],
        ...                          ["11/03/2004","07.00.00",1.1,1144,29,3.2,667,98,1490,82,1339,730,10.2,59.6,0.7417],
        ...                          ["11/03/2004","08.00.00",2,1333,64,8.0,900,174,1136,112,1517,1102,10.8,57.4,0.7408],
        ...                          ["11/03/2004","09.00.00",2.2,1351,87,9.5,960,129,1079,101,1583,1028,10.5,60.6,0.7691],
        ...                          ["11/03/2004","10.00.00",1.7,1233,77,6.3,827,112,1218,98,1446,860,10.8,58.4,0.7552],
        ...                          ["11/03/2004","11.00.00",1.5,1179,43,5.0,762,95,1328,92,1362,671,10.5,57.9,0.7352],
        ...                          ["11/03/2004","12.00.00",1.6,1236,61,5.2,774,104,1301,95,1401,664,9.5,66.8,0.7951],
        ...                          ["11/03/2004","13.00.00",1.9,1286,63,7.3,869,146,1162,112,1537,799,8.3,76.4,0.8393],
        ...                          ["11/03/2004","14.00.00",2.9,1371,164,11.5,1034,207,983,128,1730,1037,8.0,81.1,0.8736],
        ...                          ["11/03/2004","15.00.00",2.2,1310,79,8.8,933,184,1082,126,1647,946,8.3,79.8,0.8778],
        ...                          ["11/03/2004","16.00.00",2.2,1292,95,8.3,912,193,1103,131,1591,957,9.7,71.2,0.8569],
        ...                          ["11/03/2004","17.00.00",2.9,1383,150,11.2,1020,243,1008,135,1719,1104,9.8,67.6,0.8185]],
        ...                          schema=schema, validate_schema=True)
        -etc-
    </hide>
        >>> frame.inspect()
        [#]  Date        Time      CO_GT  PT08_S1_CO  NMHC_GT  C6H6_GT  PT08_S2_NMHC
        ============================================================================
        [0]  10/03/2004  18.00.00    2.6        1360      150     11.9          1046
        [1]  10/03/2004  19.00.00    2.0        1292      112      9.4           955
        [2]  10/03/2004  20.00.00    2.2        1402       88      9.0           939
        [3]  10/03/2004  21.00.00    2.2        1376       80      9.2           948
        [4]  10/03/2004  22.00.00    1.6        1272       51      6.5           836
        [5]  10/03/2004  23.00.00    1.2        1197       38      4.7           750
        [6]  11/03/2004  00.00.00    1.2        1185       31      3.6           690
        [7]  11/03/2004  01.00.00    1.0        1136       31      3.3           672
        [8]  11/03/2004  02.00.00    0.9        1094       24      2.3           609
        [9]  11/03/2004  03.00.00    0.6        1010       19      1.7           561
        <BLANKLINE>
        [#]  NOx_GT  PT08_S3_NOx  NO2_GT  PT08_S4_NO2  PT08_S5_O3_  T     RH    AH
        ==============================================================================
        [0]     166         1056     113         1692         1268  13.6  48.9  0.7578
        [1]     103         1174      92         1559          972  13.3  47.7  0.7255
        [2]     131         1140     114         1555         1074  11.9  54.0  0.7502
        [3]     172         1092     122         1584         1203  11.0  60.0  0.7867
        [4]     131         1205     116         1490         1110  11.2  59.6  0.7888
        [5]      89         1337      96         1393          949  11.2  59.2  0.7848
        [6]      62         1462      77         1333          733  11.3  56.8  0.7603
        [7]      62         1453      76         1333          730  10.7  60.0  0.7702
        [8]      45         1579      60         1276          620  10.7  59.7  0.7648
        [9]    -200         1705    -200         1235          501  10.3  60.2  0.7517
    We will be using the column "T" (temperature) as our time series value:
        >>> y = "T"
    The sensor values will be used as our exogenous variables:
        >>> x = ['CO_GT','PT08_S1_CO','NMHC_GT','C6H6_GT','PT08_S2_NMHC','NOx_GT','PT08_S3_NOx','NO2_GT','PT08_S4_NO2','PT08_S5_O3_']
    Train the model and then take a look at the model properties and coefficients:
        >>> model = tc.models.timeseries.arx.train(frame, y, x, 0, 0, True)
        <progress>
        >>> model
        c            = 0.0
        coefficients = [0.005567992923907625, -0.010969068059453009, 0.012556586798371176, -0.39792503380811506, 0.04289162879826746, -0.012253952164677924, 0.01192148525581035, 0.014100699808650077, -0.021091473795935345, 0.007622676727420039]
        no_intercept = True
        x_max_lag    = 0
        y_max_lag    = 0
    In this example, we will call predict using the same frame that was used for training, again specifying the name
    of the time series column and the names of the columns that contain exogenous regressors.
        >>> predicted_frame = model.predict(frame, y, x)
        <progress>
    The predicted_frame that's return has a new column called *predicted_y*.  This column contains the predicted
    time series values.
        >>> predicted_frame.column_names
        [u'Date',
         u'Time',
         u'CO_GT',
         u'PT08_S1_CO',
         u'NMHC_GT',
         u'C6H6_GT',
         u'PT08_S2_NMHC',
         u'NOx_GT',
         u'PT08_S3_NOx',
         u'NO2_GT',
         u'PT08_S4_NO2',
         u'PT08_S5_O3_',
         u'T',
         u'RH',
         u'AH',
         u'predicted_y']
        >>> predicted_frame.inspect(n=15, columns=["T","predicted_y"])
        [##]  T     predicted_y
        =========================
        [0]   13.6   13.236459938
        [1]   13.3  13.0250130899
        [2]   11.9  11.4147282294
        [3]   11.0  11.3157457822
        [4]   11.2  11.3982074883
        [5]   11.2  11.7079198051
        [6]   11.3  10.7879916472
        [7]   10.7   10.527428478
        [8]   10.7  10.4439615476
        [9]   10.3   10.276662138
        [10]  10.1  10.0999996581
        [11]  11.0  11.2849327784
        [12]  10.5  10.5726885589
        [13]  10.2  10.1984619512
        [14]  10.8  11.0063774234
    The trained model can be saved to be used later:
        >>> model_path = "sandbox/savedArxModel"
        >>> model.save(model_path)
    The saved model can be loaded through the tk context and then used for forecasting values the same way
    that the original model was used.
        >>> loaded_model = tc.load(model_path)
        
        >>> predicted_frame = loaded_model.predict(frame, y, x)
        >>> predicted_frame.inspect(n=15,columns=["T","predicted_y"])
        [##]  T     predicted_y
        =========================
        [0]   13.6   13.236459938
        [1]   13.3  13.0250130899
        [2]   11.9  11.4147282294
        [3]   11.0  11.3157457822
        [4]   11.2  11.3982074883
        [5]   11.2  11.7079198051
        [6]   11.3  10.7879916472
        [7]   10.7   10.527428478
        [8]   10.7  10.4439615476
        [9]   10.3   10.276662138
        [10]  10.1  10.0999996581
        [11]  11.0  11.2849327784
        [12]  10.5  10.5726885589
        [13]  10.2  10.1984619512
        [14]  10.8  11.0063774234
    The trained model can also be exported to a .mar file, to be used with the scoring engine:
        >>> canonical_path = model.export_to_mar("sandbox/arx.mar")
    <hide>
        >>> import os
        >>> assert(os.path.isfile(canonical_path))
    </hide>
    """
    def __init__(self, tc, scala_model):
        self._tc = tc
        tc.jutils.validate_is_jvm_instance_of(scala_model, get_scala_obj(tc))
        self._scala = scala_model
    @staticmethod
    def _from_scala(tc, scala_model):
        """
        Load an ARX model
        :param tc: (TkContext) Active TkContext
        :param scala_model: (scala ArxModel) Scala model to load
        :return: (ArxModel) ArxModel object
        """
        return ArxModel(tc, scala_model)
    @property
    def y_max_lag(self):
        """
        The maximum lag order for the dependent (time series) values.
        """
        return self._scala.yMaxLag()
    @property
    def x_max_lag(self):
        """
        The maximum lag order for exogenous variables.
        """
        return self._scala.xMaxLag()
    @property
    def c(self):
        """
        An intercept term (zero if none desired), from the trained model.
        """
        return self._scala.c()
    @property
    def coefficients(self):
        """
        Coefficient values from the trained model.
        """
        return list(self._tc.jutils.convert.from_scala_seq(self._scala.coefficients()))
    @property
    def no_intercept(self):
        """
        A boolean flag indicating if the intercept should be dropped.
        """
        return self._scala.noIntercept()
    def predict(self, frame, ts_column, x_columns):
        """
        New frame with column of predicted y values
        Predict the time series values for a test frame, based on the specified x values.  Creates a new frame
        revision with the existing columns and a new predicted_y column.
        Parameters
        ----------
        :param frame: (Frame) Frame used for predicting the ts values
        :param ts_column: (str) Name of the time series column
        :param x_columns: (List[str]) Names of the column(s) that contain the values of the exogenous inputs.
        :return: (Frame) A new frame containing the original frame's columns and a column *predictied_y*
        """
        if not isinstance(frame, self._tc.frame.Frame):
            raise TypeError("'frame' parameter should be a spark-tk Frame object.")
        if not isinstance(ts_column, basestring):
            raise TypeError("'ts_column' parameter should be a string (name of the column that has the timeseries value).")
        if not isinstance(x_columns, list) or not all(isinstance(c, str) for c in x_columns):
            raise TypeError("'x_columns' parameter should be a list of strings (names of the exogenous columns).")
        elif len(x_columns) <= 0:
            raise ValueError("'x_columns' should not be empty.")
        scala_x_columns = self._tc.jutils.convert.to_scala_vector_string(x_columns)
        from sparktk.frame.frame import Frame
        return Frame(self._tc, self._scala.predict(frame._scala, ts_column, scala_x_columns))
    def save(self, path):
        """
        Save the trained model to the specified path.
        Parameters
        ----------
        :param path: (str) Path to save
        """
        self._scala.save(self._tc._scala_sc, path)
    def export_to_mar(self, path):
        """
        Exports the trained model as a model archive (.mar) to the specified path.
        Parameters
        ----------
        :param path: (str) Path to save the trained model
        :returns (str) Full path to the saved .mar file
        """
        if not isinstance(path, basestring):
            raise TypeError("path parameter must be a str, but received %s" % type(path))
        return self._scala.exportToMar(self._tc._scala_sc, path)
del PropertiesObject
 | 
	apache-2.0 | -6,667,347,977,254,630,000 | 43.401055 | 244 | 0.547243 | false | 
| 
	wederw/bitcoin | 
	qa/rpc-tests/mempool_spendcoinbase.py | 
	6 | 
	2762 | 
	#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
    def setup_network(self):
        # Just need one node for this test
        args = ["-checkmempool", "-debug=mempool"]
        self.nodes = []
        self.nodes.append(start_node(0, self.options.tmpdir, args))
        self.is_network_split = False
    def create_tx(self, from_txid, to_address, amount):
        inputs = [{ "txid" : from_txid, "vout" : 0}]
        outputs = { to_address : amount }
        rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
        signresult = self.nodes[0].signrawtransaction(rawtx)
        assert_equal(signresult["complete"], True)
        return signresult["hex"]
    def run_test(self):
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)
        node0_address = self.nodes[0].getnewaddress()
        # Coinbase at height chain_height-100+1 ok in mempool, should
        # get mined. Coinbase at height chain_height-100+2 is
        # is too immature to spend.
        b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
        coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
        spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
        spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
        # coinbase at height 102 should be too immature to spend
        assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
        # mempool should have just spend_101:
        assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
        # mine a block, spend_101 should get confirmed
        self.nodes[0].generate(1)
        assert_equal(set(self.nodes[0].getrawmempool()), set())
        # ... and now height 102 can be spent:
        spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
        assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
    MempoolSpendCoinbaseTest().main()
 | 
	mit | -586,170,797,680,164,400 | 39.028986 | 91 | 0.673787 | false | 
| 
	danilo-massa/honeyd | 
	webserver/htdocs/index.py | 
	2 | 
	1445 | 
	import honeyd
import time
import support
from htmltmpl import TemplateManager, TemplateProcessor
global counter
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_nocache()
self.end_headers()
# Compile or load already precompiled template.
template = TemplateManager().prepare(self.root+"/templates/index.tmpl")
tproc = TemplateProcessor(0)
# Process commands given to us
message = support.parse_query(self.query)
# Set the title.
tproc.set("title", "Honeyd Administration Interface")
# Test
try:
    counter += 1
except:
    counter = 1
greeting = ("Welcome to the Honeyd Administration Interface."
            "You are visitor %d.<p>") % counter
content = support.interface_table()
content += "<p>" + support.stats_table(self.root) + "</p>\n"
content += "<p>" + support.status_connections(self.root, "tcp") + "</p>\n"
content += "<p>" + support.status_connections(self.root, "udp") + "</p>\n"
side_content = ("<div class=graphs>"
                "<img height=155 width=484 src=/graphs/traffic_hourly.gif><br>"
                "<img height=155 width=484 src=/graphs/traffic_daily.gif>"
                "</div>")
support.security_check(tproc)
if message:
    tproc.set("message", message)
tproc.set("greeting", greeting)
tproc.set("content", content)
tproc.set("side_content", side_content)
tproc.set("uptime", support.uptime())
# Print the processed template.
self.wfile.write(tproc.process(template))
 | 
	gpl-2.0 | 7,020,533,826,385,482,000 | 26.264151 | 79 | 0.689965 | false | 
| 
	lmregus/Portfolio | 
	python/design_patterns/env/lib/python3.7/site-packages/pip/_vendor/chardet/escsm.py | 
	289 | 
	10510 | 
	######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#   Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301  USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
HZ_CLS = (
1,0,0,0,0,0,0,0,  # 00 - 07
0,0,0,0,0,0,0,0,  # 08 - 0f
0,0,0,0,0,0,0,0,  # 10 - 17
0,0,0,1,0,0,0,0,  # 18 - 1f
0,0,0,0,0,0,0,0,  # 20 - 27
0,0,0,0,0,0,0,0,  # 28 - 2f
0,0,0,0,0,0,0,0,  # 30 - 37
0,0,0,0,0,0,0,0,  # 38 - 3f
0,0,0,0,0,0,0,0,  # 40 - 47
0,0,0,0,0,0,0,0,  # 48 - 4f
0,0,0,0,0,0,0,0,  # 50 - 57
0,0,0,0,0,0,0,0,  # 58 - 5f
0,0,0,0,0,0,0,0,  # 60 - 67
0,0,0,0,0,0,0,0,  # 68 - 6f
0,0,0,0,0,0,0,0,  # 70 - 77
0,0,0,4,0,5,2,0,  # 78 - 7f
1,1,1,1,1,1,1,1,  # 80 - 87
1,1,1,1,1,1,1,1,  # 88 - 8f
1,1,1,1,1,1,1,1,  # 90 - 97
1,1,1,1,1,1,1,1,  # 98 - 9f
1,1,1,1,1,1,1,1,  # a0 - a7
1,1,1,1,1,1,1,1,  # a8 - af
1,1,1,1,1,1,1,1,  # b0 - b7
1,1,1,1,1,1,1,1,  # b8 - bf
1,1,1,1,1,1,1,1,  # c0 - c7
1,1,1,1,1,1,1,1,  # c8 - cf
1,1,1,1,1,1,1,1,  # d0 - d7
1,1,1,1,1,1,1,1,  # d8 - df
1,1,1,1,1,1,1,1,  # e0 - e7
1,1,1,1,1,1,1,1,  # e8 - ef
1,1,1,1,1,1,1,1,  # f0 - f7
1,1,1,1,1,1,1,1,  # f8 - ff
)
HZ_ST = (
MachineState.START,MachineState.ERROR,     3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,     4,MachineState.ERROR,# 10-17
     5,MachineState.ERROR,     6,MachineState.ERROR,     5,     5,     4,MachineState.ERROR,# 18-1f
     4,MachineState.ERROR,     4,     4,     4,MachineState.ERROR,     4,MachineState.ERROR,# 20-27
     4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
)
HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
HZ_SM_MODEL = {'class_table': HZ_CLS,
               'class_factor': 6,
               'state_table': HZ_ST,
               'char_len_table': HZ_CHAR_LEN_TABLE,
               'name': "HZ-GB-2312",
               'language': 'Chinese'}
ISO2022CN_CLS = (
2,0,0,0,0,0,0,0,  # 00 - 07
0,0,0,0,0,0,0,0,  # 08 - 0f
0,0,0,0,0,0,0,0,  # 10 - 17
0,0,0,1,0,0,0,0,  # 18 - 1f
0,0,0,0,0,0,0,0,  # 20 - 27
0,3,0,0,0,0,0,0,  # 28 - 2f
0,0,0,0,0,0,0,0,  # 30 - 37
0,0,0,0,0,0,0,0,  # 38 - 3f
0,0,0,4,0,0,0,0,  # 40 - 47
0,0,0,0,0,0,0,0,  # 48 - 4f
0,0,0,0,0,0,0,0,  # 50 - 57
0,0,0,0,0,0,0,0,  # 58 - 5f
0,0,0,0,0,0,0,0,  # 60 - 67
0,0,0,0,0,0,0,0,  # 68 - 6f
0,0,0,0,0,0,0,0,  # 70 - 77
0,0,0,0,0,0,0,0,  # 78 - 7f
2,2,2,2,2,2,2,2,  # 80 - 87
2,2,2,2,2,2,2,2,  # 88 - 8f
2,2,2,2,2,2,2,2,  # 90 - 97
2,2,2,2,2,2,2,2,  # 98 - 9f
2,2,2,2,2,2,2,2,  # a0 - a7
2,2,2,2,2,2,2,2,  # a8 - af
2,2,2,2,2,2,2,2,  # b0 - b7
2,2,2,2,2,2,2,2,  # b8 - bf
2,2,2,2,2,2,2,2,  # c0 - c7
2,2,2,2,2,2,2,2,  # c8 - cf
2,2,2,2,2,2,2,2,  # d0 - d7
2,2,2,2,2,2,2,2,  # d8 - df
2,2,2,2,2,2,2,2,  # e0 - e7
2,2,2,2,2,2,2,2,  # e8 - ef
2,2,2,2,2,2,2,2,  # f0 - f7
2,2,2,2,2,2,2,2,  # f8 - ff
)
ISO2022CN_ST = (
MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
     5,     6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
)
ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
                      'class_factor': 9,
                      'state_table': ISO2022CN_ST,
                      'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
                      'name': "ISO-2022-CN",
                      'language': 'Chinese'}
ISO2022JP_CLS = (
2,0,0,0,0,0,0,0,  # 00 - 07
0,0,0,0,0,0,2,2,  # 08 - 0f
0,0,0,0,0,0,0,0,  # 10 - 17
0,0,0,1,0,0,0,0,  # 18 - 1f
0,0,0,0,7,0,0,0,  # 20 - 27
3,0,0,0,0,0,0,0,  # 28 - 2f
0,0,0,0,0,0,0,0,  # 30 - 37
0,0,0,0,0,0,0,0,  # 38 - 3f
6,0,4,0,8,0,0,0,  # 40 - 47
0,9,5,0,0,0,0,0,  # 48 - 4f
0,0,0,0,0,0,0,0,  # 50 - 57
0,0,0,0,0,0,0,0,  # 58 - 5f
0,0,0,0,0,0,0,0,  # 60 - 67
0,0,0,0,0,0,0,0,  # 68 - 6f
0,0,0,0,0,0,0,0,  # 70 - 77
0,0,0,0,0,0,0,0,  # 78 - 7f
2,2,2,2,2,2,2,2,  # 80 - 87
2,2,2,2,2,2,2,2,  # 88 - 8f
2,2,2,2,2,2,2,2,  # 90 - 97
2,2,2,2,2,2,2,2,  # 98 - 9f
2,2,2,2,2,2,2,2,  # a0 - a7
2,2,2,2,2,2,2,2,  # a8 - af
2,2,2,2,2,2,2,2,  # b0 - b7
2,2,2,2,2,2,2,2,  # b8 - bf
2,2,2,2,2,2,2,2,  # c0 - c7
2,2,2,2,2,2,2,2,  # c8 - cf
2,2,2,2,2,2,2,2,  # d0 - d7
2,2,2,2,2,2,2,2,  # d8 - df
2,2,2,2,2,2,2,2,  # e0 - e7
2,2,2,2,2,2,2,2,  # e8 - ef
2,2,2,2,2,2,2,2,  # f0 - f7
2,2,2,2,2,2,2,2,  # f8 - ff
)
ISO2022JP_ST = (
MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,MachineState.ERROR,# 20-27
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
)
ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
                      'class_factor': 10,
                      'state_table': ISO2022JP_ST,
                      'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
                      'name': "ISO-2022-JP",
                      'language': 'Japanese'}
ISO2022KR_CLS = (
2,0,0,0,0,0,0,0,  # 00 - 07
0,0,0,0,0,0,0,0,  # 08 - 0f
0,0,0,0,0,0,0,0,  # 10 - 17
0,0,0,1,0,0,0,0,  # 18 - 1f
0,0,0,0,3,0,0,0,  # 20 - 27
0,4,0,0,0,0,0,0,  # 28 - 2f
0,0,0,0,0,0,0,0,  # 30 - 37
0,0,0,0,0,0,0,0,  # 38 - 3f
0,0,0,5,0,0,0,0,  # 40 - 47
0,0,0,0,0,0,0,0,  # 48 - 4f
0,0,0,0,0,0,0,0,  # 50 - 57
0,0,0,0,0,0,0,0,  # 58 - 5f
0,0,0,0,0,0,0,0,  # 60 - 67
0,0,0,0,0,0,0,0,  # 68 - 6f
0,0,0,0,0,0,0,0,  # 70 - 77
0,0,0,0,0,0,0,0,  # 78 - 7f
2,2,2,2,2,2,2,2,  # 80 - 87
2,2,2,2,2,2,2,2,  # 88 - 8f
2,2,2,2,2,2,2,2,  # 90 - 97
2,2,2,2,2,2,2,2,  # 98 - 9f
2,2,2,2,2,2,2,2,  # a0 - a7
2,2,2,2,2,2,2,2,  # a8 - af
2,2,2,2,2,2,2,2,  # b0 - b7
2,2,2,2,2,2,2,2,  # b8 - bf
2,2,2,2,2,2,2,2,  # c0 - c7
2,2,2,2,2,2,2,2,  # c8 - cf
2,2,2,2,2,2,2,2,  # d0 - d7
2,2,2,2,2,2,2,2,  # d8 - df
2,2,2,2,2,2,2,2,  # e0 - e7
2,2,2,2,2,2,2,2,  # e8 - ef
2,2,2,2,2,2,2,2,  # f0 - f7
2,2,2,2,2,2,2,2,  # f8 - ff
)
ISO2022KR_ST = (
MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,MachineState.ERROR,# 10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
)
ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
                      'class_factor': 6,
                      'state_table': ISO2022KR_ST,
                      'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
                      'name': "ISO-2022-KR",
                      'language': 'Korean'}
 | 
	mit | -308,592,456,976,034,500 | 41.723577 | 165 | 0.628069 | false | 
| 
	openvapour/ryu | 
	ryu/services/protocols/bgp/info_base/rtc.py | 
	52 | 
	2467 | 
	# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
 Defines data types and models required specifically for RTC support.
"""
import logging
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.services.protocols.bgp.info_base.base import Destination
from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
from ryu.services.protocols.bgp.info_base.base import Path
from ryu.services.protocols.bgp.info_base.base import Table
LOG = logging.getLogger('bgpspeaker.info_base.rtc')
class RtcTable(Table):
    """Global table to store RT membership information.
    Uses `RtDest` to store destination information for each known RT NLRI path.
    """
    ROUTE_FAMILY = RF_RTC_UC
    def __init__(self, core_service, signal_bus):
        Table.__init__(self, None, core_service, signal_bus)
    def _table_key(self, rtc_nlri):
        """Return a key that will uniquely identify this RT NLRI inside
        this table.
        """
        return str(rtc_nlri.origin_as) + ':' + rtc_nlri.route_target
    def _create_dest(self, nlri):
        return RtcDest(self, nlri)
    def __str__(self):
        return 'RtcTable(scope_id: %s, rf: %s)' % (self.scope_id,
                                                   self.route_family)
class RtcDest(Destination, NonVrfPathProcessingMixin):
    ROUTE_FAMILY = RF_RTC_UC
    def _new_best_path(self, new_best_path):
        NonVrfPathProcessingMixin._new_best_path(self, new_best_path)
    def _best_path_lost(self):
        NonVrfPathProcessingMixin._best_path_lost(self)
class RtcPath(Path):
    ROUTE_FAMILY = RF_RTC_UC
    def __init__(self, source, nlri, src_ver_num, pattrs=None,
                 nexthop='0.0.0.0', is_withdraw=False,
                 med_set_by_target_neighbor=False):
        Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
                      is_withdraw, med_set_by_target_neighbor)
 | 
	apache-2.0 | -6,789,287,685,204,760,000 | 32.794521 | 79 | 0.679773 | false | 
| 
	admcrae/tensorflow | 
	tensorflow/contrib/learn/python/learn/ops/seq2seq_ops.py | 
	61 | 
	5870 | 
	# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Ops for Sequence to Sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
  """Returns predictions and loss for sequence of predictions.
  Args:
    decoding: List of Tensors with predictions.
    labels: List of Tensors with labels.
    sampling_decoding: Optional, List of Tensor with predictions to be used
      in sampling. E.g. they shouldn't have dependncy on outputs.
      If not provided, decoding is used.
    name: Operation name.
  Returns:
    Predictions and losses tensors.
  """
  with ops.name_scope(name, "sequence_classifier", [decoding, labels]):
    predictions, xent_list = [], []
    for i, pred in enumerate(decoding):
      xent_list.append(nn.softmax_cross_entropy_with_logits(
          labels=labels[i], logits=pred,
          name="sequence_loss/xent_raw{0}".format(i)))
      if sampling_decoding:
        predictions.append(nn.softmax(sampling_decoding[i]))
      else:
        predictions.append(nn.softmax(pred))
    xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
    loss = math_ops.reduce_sum(xent, name="sequence_loss")
    return array_ops.stack(predictions, axis=1), loss
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
  """Processes inputs for Sequence to Sequence models.
  Args:
    x: Input Tensor [batch_size, input_length, embed_dim].
    y: Output Tensor [batch_size, output_length, embed_dim].
    input_length: length of input x.
    output_length: length of output y.
    sentinel: optional first input to decoder and final output expected.
      If sentinel is not provided, zeros are used. Due to fact that y is not
      available in sampling time, shape of sentinel will be inferred from x.
    name: Operation name.
  Returns:
    Encoder input from x, and decoder inputs and outputs from y.
  """
  with ops.name_scope(name, "seq2seq_inputs", [x, y]):
    in_x = array_ops.unstack(x, axis=1)
    y = array_ops.unstack(y, axis=1)
    if not sentinel:
      # Set to zeros of shape of y[0], using x for batch size.
      sentinel_shape = array_ops.stack(
          [array_ops.shape(x)[0], y[0].get_shape()[1]])
      sentinel = array_ops.zeros(sentinel_shape)
      sentinel.set_shape(y[0].get_shape())
    in_y = [sentinel] + y
    out_y = y + [sentinel]
    return in_x, in_y, out_y
def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
  """RNN Decoder that creates training and sampling sub-graphs.
  Args:
    decoder_inputs: Inputs for decoder, list of tensors.
      This is used only in training sub-graph.
    initial_state: Initial state for the decoder.
    cell: RNN cell to use for decoder.
    scope: Scope to use, if None new will be produced.
  Returns:
    List of tensors for outputs and states for training and sampling sub-graphs.
  """
  with vs.variable_scope(scope or "dnn_decoder"):
    states, sampling_states = [initial_state], [initial_state]
    outputs, sampling_outputs = [], []
    with ops.name_scope("training", values=[decoder_inputs, initial_state]):
      for i, inp in enumerate(decoder_inputs):
        if i > 0:
          vs.get_variable_scope().reuse_variables()
        output, new_state = cell(inp, states[-1])
        outputs.append(output)
        states.append(new_state)
    with ops.name_scope("sampling", values=[initial_state]):
      for i, _ in enumerate(decoder_inputs):
        if i == 0:
          sampling_outputs.append(outputs[i])
          sampling_states.append(states[i])
        else:
          sampling_output, sampling_state = cell(sampling_outputs[-1],
                                                 sampling_states[-1])
          sampling_outputs.append(sampling_output)
          sampling_states.append(sampling_state)
  return outputs, states, sampling_outputs, sampling_states
def rnn_seq2seq(encoder_inputs,
                decoder_inputs,
                encoder_cell,
                decoder_cell=None,
                dtype=dtypes.float32,
                scope=None):
  """RNN Sequence to Sequence model.
  Args:
    encoder_inputs: List of tensors, inputs for encoder.
    decoder_inputs: List of tensors, inputs for decoder.
    encoder_cell: RNN cell to use for encoder.
    decoder_cell: RNN cell to use for decoder, if None encoder_cell is used.
    dtype: Type to initialize encoder state with.
    scope: Scope to use, if None new will be produced.
  Returns:
    List of tensors for outputs and states for trianing and sampling sub-graphs.
  """
  with vs.variable_scope(scope or "rnn_seq2seq"):
    _, last_enc_state = rnn.static_rnn(
        encoder_cell, encoder_inputs, dtype=dtype)
    return rnn_decoder(decoder_inputs, last_enc_state, decoder_cell or
                       encoder_cell)
 | 
	apache-2.0 | -5,773,286,829,979,748,000 | 38.395973 | 80 | 0.671891 | false | 
| 
	pyblish/pyblish-starter | 
	pyblish_starter/vendor/jsonschema/_reflect.py | 
	7 | 
	5008 | 
	# -*- test-case-name: twisted.test.test_reflect -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standardized versions of various cool and/or strange things that you can do
with Python's reflection capabilities.
"""
import sys
from .compat import PY3
class _NoModuleFound(Exception):
    """
    No module was found because none exists.
    """
class InvalidName(ValueError):
    """
    The given name is not a dot-separated list of Python objects.
    """
class ModuleNotFound(InvalidName):
    """
    The module associated with the given name doesn't exist and it can't be
    imported.
    """
class ObjectNotFound(InvalidName):
    """
    The object associated with the given name doesn't exist and it can't be
    imported.
    """
if PY3:
    def reraise(exception, traceback):
        raise exception.with_traceback(traceback)
else:
    exec("""def reraise(exception, traceback):
        raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
def _importAndCheckStack(importName):
    """
    Import the given name as a module, then walk the stack to determine whether
    the failure was the module not existing, or some code in the module (for
    example a dependent import) failing.  This can be helpful to determine
    whether any actual application code was run.  For example, to distiguish
    administrative error (entering the wrong module name), from programmer
    error (writing buggy code in a module that fails to import).
    @param importName: The name of the module to import.
    @type importName: C{str}
    @raise Exception: if something bad happens.  This can be any type of
        exception, since nobody knows what loading some arbitrary code might
        do.
    @raise _NoModuleFound: if no module was found.
    """
    try:
        return __import__(importName)
    except ImportError:
        excType, excValue, excTraceback = sys.exc_info()
        while excTraceback:
            execName = excTraceback.tb_frame.f_globals["__name__"]
            # in Python 2 execName is None when an ImportError is encountered,
            # where in Python 3 execName is equal to the importName.
            if execName is None or execName == importName:
                reraise(excValue, excTraceback)
            excTraceback = excTraceback.tb_next
        raise _NoModuleFound()
def namedAny(name):
    """
    Retrieve a Python object by its fully qualified name from the global Python
    module namespace.  The first part of the name, that describes a module,
    will be discovered and imported.  Each subsequent part of the name is
    treated as the name of an attribute of the object specified by all of the
    name which came before it.  For example, the fully-qualified name of this
    object is 'twisted.python.reflect.namedAny'.
    @type name: L{str}
    @param name: The name of the object to return.
    @raise InvalidName: If the name is an empty string, starts or ends with
        a '.', or is otherwise syntactically incorrect.
    @raise ModuleNotFound: If the name is syntactically correct but the
        module it specifies cannot be imported because it does not appear to
        exist.
    @raise ObjectNotFound: If the name is syntactically correct, includes at
        least one '.', but the module it specifies cannot be imported because
        it does not appear to exist.
    @raise AttributeError: If an attribute of an object along the way cannot be
        accessed, or a module along the way is not found.
    @return: the Python object identified by 'name'.
    """
    if not name:
        raise InvalidName('Empty module name')
    names = name.split('.')
    # if the name starts or ends with a '.' or contains '..', the __import__
    # will raise an 'Empty module name' error. This will provide a better error
    # message.
    if '' in names:
        raise InvalidName(
            "name must be a string giving a '.'-separated list of Python "
            "identifiers, not %r" % (name,))
    topLevelPackage = None
    moduleNames = names[:]
    while not topLevelPackage:
        if moduleNames:
            trialname = '.'.join(moduleNames)
            try:
                topLevelPackage = _importAndCheckStack(trialname)
            except _NoModuleFound:
                moduleNames.pop()
        else:
            if len(names) == 1:
                raise ModuleNotFound("No module named %r" % (name,))
            else:
                raise ObjectNotFound('%r does not name an object' % (name,))
    obj = topLevelPackage
    for n in names[1:]:
        obj = getattr(obj, n)
    return obj
 | 
	mit | 2,590,815,536,556,024,300 | 32.386667 | 79 | 0.665136 | false | 
| 
	gdimitris/ChessPuzzler | 
	Virtual_Environment/lib/python2.7/site-packages/requests/packages/chardet/big5prober.py | 
	2931 | 
	1684 | 
	######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#   Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301  USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
    def __init__(self):
        MultiByteCharSetProber.__init__(self)
        self._mCodingSM = CodingStateMachine(Big5SMModel)
        self._mDistributionAnalyzer = Big5DistributionAnalysis()
        self.reset()
    def get_charset_name(self):
        return "Big5"
 | 
	mit | -4,230,622,644,537,755,000 | 39.095238 | 69 | 0.718527 | false | 
| 
	nive-cms/nive | 
	nive/userdb/tests/test_app.py | 
	1 | 
	7136 | 
	# -*- coding: utf-8 -*-
import time
import unittest
from nive.security import AdminUser, UserFound
from db_app import *
class ObjectTest(unittest.TestCase):
    def setUp(self):
        self.app = app()
    def tearDown(self):
        self.app.Close()
        pass
    
    def test_add(self):
        a=self.app
        root=a.root()
        user = User("test")
        # root
        root.DeleteUser("user1")
        root.DeleteUser("user2")
        root.DeleteUser("user3")
        data = {"password": "11111", "surname": "surname", "lastname": "lastname", "organistion": "organisation"}
        
        data["name"] = "user1"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=0, mail=None, notify=False, groups="", currentUser=user)
        self.assert_(o,r)
        o,r = root.AddUser(data, activate=1, generatePW=0, mail=None, notify=False, groups="", currentUser=user)
        self.assertFalse(o,r)
        data["name"] = "user2"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=1, mail=None, notify=False, groups="group:author", currentUser=user)
        self.assert_(o,r)
        data["name"] = "user3"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=0, generatePW=1, mail=None, notify=False, groups="group:editor", currentUser=user)
        self.assert_(o,r)
        self.assert_("group:editor" in o.data.groups, o.data.groups)
        self.assert_(o.data.password != "11111")
        self.assertFalse(o.meta.pool_state)
        
        root.MailUserPass(email = "user1", mailtmpl = None)
        root.MailUserPass(email = "[email protected]", mailtmpl = None, createNewPasswd=False)
        root.MailUserPass(email = "[email protected]", mailtmpl = None)
        
        self.assert_(root.GetUserByName("user2", activeOnly=1))
        self.assert_(root.GetUserByID(o.id, activeOnly=0))
        self.assert_(root.GetUserByMail("[email protected]", activeOnly=1))
        
        self.assert_(root.LookupUser(name="user1", id=None, activeOnly=1))
        self.assertFalse(root.LookupUser(name="user3", id=None, activeOnly=1))
        self.assert_(root.LookupUser(name="user3", id=None, activeOnly=0))
        
        self.assert_(len(root.GetUserInfos(["user1", "user2"], fields=["name", "email", "title"], activeOnly=True)))
        self.assert_(len(root.GetUsersWithGroup("group:author", fields=["name"], activeOnly=True)))
        self.assert_(len(root.GetUsersWithGroup("group:editor", fields=["name"], activeOnly=False)))
        self.assertFalse(len(root.GetUsersWithGroup("group:editor", fields=["name"], activeOnly=True)))
        self.assert_(len(root.GetUsers()))
        
        root.DeleteUser("user1")
        root.DeleteUser("user2")
        root.DeleteUser("user3")
        
    def test_login(self):
        a=self.app
        root=a.root()
        root.identityField=u"name"
        user = User("test")
        # root
        root.DeleteUser("user1")
        root.DeleteUser("user2")
        root.DeleteUser("user3")
        data = {"password": "11111", "surname": "surname", "lastname": "lastname", "organistion": "organisation"}
        
        data["name"] = "user1"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=0, mail=None, notify=False, groups="", currentUser=user)
        self.assert_(o,r)
        l,r = root.Login("user1", "11111", raiseUnauthorized = 0)
        self.assert_(l,r)
        self.assert_(root.Logout("user1"))
        l,r = root.Login("user1", "aaaaa", raiseUnauthorized = 0)
        self.assertFalse(l,r)
        l,r = root.Login("user1", "", raiseUnauthorized = 0)
        self.assertFalse(l,r)
        data["name"] = "user2"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=1, mail=None, notify=False, groups="", currentUser=user)
        self.assert_(o,r)
        l,r = root.Login("user2", o.data.password, raiseUnauthorized = 0)
        self.assertFalse(l,r)
        self.assert_(root.Logout("user1"))
        l,r = root.Login("user2", "11111", raiseUnauthorized = 0)
        self.assertFalse(l,r)
        data["name"] = "user3"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=0, generatePW=1, mail=None, notify=False, groups="group:author", currentUser=user)
        self.assert_(o,r)
        l,r = root.Login("user3", o.data.password, raiseUnauthorized = 0)
        self.assertFalse(l,r)
        self.assertFalse(root.Logout("user3"))
        
        root.DeleteUser("user1")
        root.DeleteUser("user2")
        root.DeleteUser("user3")
    def test_user(self):
        a=self.app
        root=a.root()
        user = User("test")
        # root
        root.DeleteUser("user1")
        data = {"password": "11111", "surname": "surname", "lastname": "lastname", "organistion": "organisation"}
        
        data["name"] = "user1"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=0, mail=None, notify=False, groups="", currentUser=user)
        self.assert_(o.SecureUpdate(data, user))
        self.assert_(o.UpdateGroups(["group:author"]))
        self.assert_(o.GetGroups()==("group:author",), o.GetGroups())
        self.assert_(o.AddGroup("group:editor", user))
        self.assert_(o.GetGroups()==("group:author","group:editor"), o.GetGroups())
        self.assert_(o.InGroups("group:editor"))
        self.assert_(o.InGroups("group:author"))
    
        self.assert_(o.ReadableName()=="surname lastname")
        root.DeleteUser("user1")
class AdminuserTest(unittest.TestCase):
    def setUp(self):
        self.app = app()
        self.app.configuration.unlock()
        self.app.configuration.admin = {"name":"admin", "password":"11111", "email":"[email protected]", "groups":("group:admin",)}
        self.app.configuration.loginByEmail = True
        self.app.configuration.lock()
        
    def tearDown(self):
        self.app.Close()
        pass
    
    def test_login(self):
        user = User("test")
        a=self.app
        root=a.root()
        root.identityField=u"name"
        root.DeleteUser("adminXXXXX")
        root.DeleteUser("admin")
        data = {"password": "11111", "surname": "surname", "lastname": "lastname", "organistion": "organisation"}
        data["name"] = "admin"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=0, mail=None, notify=False, groups="", currentUser=user)
        self.assertFalse(o,r)
        data["name"] = "adminXXXXX"
        data["email"] = "[email protected]"
        o,r = root.AddUser(data, activate=1, generatePW=0, mail=None, notify=False, groups="", currentUser=user)
        self.assertFalse(o,r)
        l,r = root.Login("admin", "11111", raiseUnauthorized = 0)
        self.assert_(l,r)
        self.assert_(root.Logout("admin"))
        l,r = root.Login("admin", "aaaaa", raiseUnauthorized = 0)
        self.assertFalse(l,r)
        l,r = root.Login("admin", "", raiseUnauthorized = 0)
        self.assertFalse(l,r)
        
 | 
	gpl-3.0 | -1,607,020,821,627,751,700 | 37.782609 | 127 | 0.591368 | false | 
| 
	SwagColoredKitteh/servo | 
	tests/wpt/web-platform-tests/tools/py/testing/root/test_py_imports.py | 
	163 | 
	1983 | 
	import py
import types
import sys
def checksubpackage(name):
    obj = getattr(py, name)
    if hasattr(obj, '__map__'): # isinstance(obj, Module):
        keys = dir(obj)
        assert len(keys) > 0
        print (obj.__map__)
        for name in list(obj.__map__):
            assert hasattr(obj, name), (obj, name)
def test_dir():
    for name in dir(py):
        if not name.startswith('_'):
            yield checksubpackage, name
def test_virtual_module_identity():
    from py import path as path1
    from py import path as path2
    assert path1 is path2
    from py.path import local as local1
    from py.path import local as local2
    assert local1 is local2
def test_importall():
    base = py._pydir
    nodirs = [
    ]
    if sys.version_info >= (3,0):
        nodirs.append(base.join('_code', '_assertionold.py'))
    else:
        nodirs.append(base.join('_code', '_assertionnew.py'))
    def recurse(p):
        return p.check(dotfile=0) and p.basename != "attic"
    for p in base.visit('*.py', recurse):
        if p.basename == '__init__.py':
            continue
        relpath = p.new(ext='').relto(base)
        if base.sep in relpath: # not py/*.py itself
            for x in nodirs:
                if p == x or p.relto(x):
                    break
            else:
                relpath = relpath.replace(base.sep, '.')
                modpath = 'py.%s' % relpath
                try:
                    check_import(modpath)
                except py.test.skip.Exception:
                    pass
def check_import(modpath):
    py.builtin.print_("checking import", modpath)
    assert __import__(modpath)
def test_all_resolves():
    seen = py.builtin.set([py])
    lastlength = None
    while len(seen) != lastlength:
        lastlength = len(seen)
        for item in py.builtin.frozenset(seen):
            for value in item.__dict__.values():
                if isinstance(value, type(py.test)):
                    seen.add(value)
 | 
	mpl-2.0 | -5,177,895,444,657,549,000 | 28.161765 | 61 | 0.548159 | false | 
| 
	rbenson/orgmode | 
	resolver/https.py | 
	2 | 
	2676 | 
	
import re
import sys
import subprocess
import sublime
from .abstract import AbstractRegexLinkResolver
try:
    import urllib.request, urllib.parse, urllib.error
except ImportError:
    import urllib
PATTERN_SETTING = 'orgmode.open_link.resolver.https.pattern'
PATTERN_DEFAULT = r'^(https):(?P<url>.+)$'
URL_SETTING = 'orgmode.open_link.resolver.https.url'
URL_DEFAULT = 'https:%s'
DEFAULT_OPEN_HTTP_LINK_COMMANDS = dict(
    darwin=['open'],
    win32=['cmd', '/C'],
    linux=['xdg-open'],
)
class Resolver(AbstractRegexLinkResolver):
    def __init__(self, view):
        super(Resolver, self).__init__(view)
        get = self.settings.get
        pattern = get(PATTERN_SETTING, PATTERN_DEFAULT)
        self.regex = re.compile(pattern)
        self.url = get(URL_SETTING, URL_DEFAULT)
        self.link_commands = self.settings.get(
            'orgmode.open_link.resolver.abstract.commands', DEFAULT_OPEN_HTTP_LINK_COMMANDS)
    def replace(self, match):
        return self.url % match.group('url')
    def execute(self, content):
        command = self.get_link_command()
        if not command:
            sublime.error_message(
                'Could not get link opener command.\nNot yet supported.')
            return None
        # cmd.exe quote is needed, http://ss64.com/nt/syntax-esc.html
        # escape these: ^\  ^&  ^|  ^>  ^<  ^^
        if sys.platform == 'win32':
            content = content.replace("^", "^^")
            content = content.replace("&", "^&")
            content = content.replace("\\", "^\\")
            content = content.replace("|", "^|")
            content = content.replace("<", "^<")
            content = content.replace(">", "^>")
        if sys.version_info[0] < 3:
            content = content.encode(sys.getfilesystemencoding())
        if sys.platform != 'win32':
            cmd = command + [content]
        else:
            cmd = command + ['start ' + content]
        print('HTTP*****')
        print(repr(content), content)
        print(repr(cmd))
        print(cmd)
        sublime.status_message('Executing: %s' % cmd)
        if sys.platform != 'win32':
            process = subprocess.Popen(
                cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        else:
            process = subprocess.Popen(
                cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
        stdout, stderr = process.communicate()
        if stdout:
            stdout = str(stdout, sys.getfilesystemencoding())
            sublime.status_message(stdout)
        if stderr:
            stderr = str(stderr, sys.getfilesystemencoding())
            sublime.error_message(stderr)
 | 
	mit | -617,696,223,155,290,200 | 31.240964 | 92 | 0.58296 | false | 
| 
	geosolutions-it/geonode | 
	geonode/upload/tests/test_files.py | 
	11 | 
	1695 | 
	# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""unit tests for geonode.upload.files module"""
from geonode.tests.base import GeoNodeBaseTestSupport
from geonode.upload import files
class FilesTestCase(GeoNodeBaseTestSupport):
    def test_scan_hint_kml_ground_overlay(self):
        result = files.get_scan_hint(["kml", "other"])
        kml_file_type = files.get_type("KML Ground Overlay")
        self.assertEqual(result, kml_file_type.code)
    def test_scan_hint_kmz_ground_overlay(self):
        result = files.get_scan_hint(["kmz", "other"])
        self.assertEqual(result, "kmz")
    def test_get_type_non_existing_type(self):
        self.assertIsNone(files.get_type("fake"))
    def test_get_type_kml_ground_overlay(self):
        file_type = files.get_type("KML Ground Overlay")
        self.assertEqual(file_type.code, "kml-overlay")
        self.assertIn("kmz", file_type.aliases)
 | 
	gpl-3.0 | 5,208,154,752,840,141,000 | 36.666667 | 73 | 0.647788 | false | 
| 
	jfinkels/networkx | 
	networkx/generators/tests/test_expanders.py | 
	55 | 
	2497 | 
	# Copyright 2014 "cheebee7i".
# Copyright 2014 "alexbrc".
# Copyright 2014 Jeffrey Finkelstein <[email protected]>.
"""Unit tests for the :mod:`networkx.generators.expanders` module.
"""
try:
    import scipy
    is_scipy_available = True
except:
    is_scipy_available = False
import networkx as nx
from networkx import adjacency_matrix
from networkx import number_of_nodes
from networkx.generators.expanders import chordal_cycle_graph
from networkx.generators.expanders import margulis_gabber_galil_graph
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_less
from nose.tools import assert_raises
from nose.tools import assert_true
def test_margulis_gabber_galil_graph():
    try:
        # Scipy is required for conversion to an adjacency matrix.
        # We also use scipy for computing the eigenvalues,
        # but this second use could be done using only numpy.
        import numpy as np
        import scipy.linalg
        has_scipy = True
    except ImportError as e:
        has_scipy = False
    for n in 2, 3, 5, 6, 10:
        g = margulis_gabber_galil_graph(n)
        assert_equal(number_of_nodes(g), n*n)
        for node in g:
            assert_equal(g.degree(node), 8)
            assert_equal(len(node), 2)
            for i in node:
                assert_equal(int(i), i)
                assert_true(0 <= i < n)
        if has_scipy:
            # Eigenvalues are already sorted using the scipy eigvalsh,
            # but the implementation in numpy does not guarantee order.
            w = sorted(scipy.linalg.eigvalsh(adjacency_matrix(g).A))
            assert_less(w[-2], 5*np.sqrt(2))
def test_chordal_cycle_graph():
    """Test for the :func:`networkx.chordal_cycle_graph` function."""
    if not is_scipy_available:
        raise SkipTest('SciPy is not available')
    primes = [3, 5, 7, 11]
    for p in primes:
        G = chordal_cycle_graph(p)
        assert_equal(len(G), p)
        # TODO The second largest eigenvalue should be smaller than a constant,
        # independent of the number of nodes in the graph:
        #
        #     eigs = sorted(scipy.linalg.eigvalsh(adjacency_matrix(G).A))
        #     assert_less(eigs[-2], ...)
        #
def test_margulis_gabber_galil_graph_badinput():
    assert_raises(nx.NetworkXError, margulis_gabber_galil_graph, 3,
                  nx.DiGraph())
    assert_raises(nx.NetworkXError, margulis_gabber_galil_graph, 3,
                  nx.Graph())
 | 
	bsd-3-clause | -8,240,160,307,509,666,000 | 33.680556 | 79 | 0.645575 | false | 
| 
	hjanime/VisTrails | 
	vistrails/gui/module_info.py | 
	1 | 
	13979 | 
	###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
##  - Redistributions of source code must retain the above copyright notice,
##    this list of conditions and the following disclaimer.
##  - Redistributions in binary form must reproduce the above copyright
##    notice, this list of conditions and the following disclaimer in the
##    documentation and/or other materials provided with the distribution.
##  - Neither the name of the New York University nor the names of its
##    contributors may be used to endorse or promote products derived from
##    this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.configuration import get_vistrails_configuration, \
                                         get_vistrails_persistent_configuration
from vistrails.core.system import systemType, vistrails_root_directory
from vistrails.core.utils import versions_increasing
from vistrails.gui.common_widgets import QDockPushButton
from vistrails.gui.module_annotation import QModuleAnnotationTable
from vistrails.gui.ports_pane import PortsList, letterIcon
from vistrails.gui.version_prop import QVersionProp
from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface
import os
class QModuleInfo(QtGui.QWidget, QVistrailsPaletteInterface):
    def __init__(self, parent=None, flags=QtCore.Qt.Widget):
        QtGui.QWidget.__init__(self, parent, flags)
        self.ports_visible = True
        self.types_visible = True
        self.build_widget()
        self.controller = None
        self.module = None
        self.pipeline_view = None # pipeline_view
        self.read_only = False
        self.is_updating = False
        self.addButtonsToToolbar()
    def addButtonsToToolbar(self):
        # button for toggling executions
        eye_open_icon = \
            QtGui.QIcon(os.path.join(vistrails_root_directory(),
                                 'gui/resources/images/eye.png'))
        self.portVisibilityAction = QtGui.QAction(eye_open_icon,
                                        "Show/hide port visibility toggle buttons",
                                        None,
                                        triggered=self.showPortVisibility)
        self.portVisibilityAction.setCheckable(True)
        self.portVisibilityAction.setChecked(True)
        self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
                                               self.portVisibilityAction)
        self.showTypesAction = QtGui.QAction(letterIcon('T'),
                                        "Show/hide type information",
                                        None,
                                        triggered=self.showTypes)
        self.showTypesAction.setCheckable(True)
        self.showTypesAction.setChecked(True)
        self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
                                               self.showTypesAction)
        self.showEditsAction = QtGui.QAction(
                 QtGui.QIcon(os.path.join(vistrails_root_directory(),
                                          'gui/resources/images/pencil.png')),
                 "Show/hide parameter widgets",
                 None,
                 triggered=self.showEdits)
        self.showEditsAction.setCheckable(True)
        self.showEditsAction.setChecked(
            get_vistrails_configuration().check('showInlineParameterWidgets'))
        self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
                                               self.showEditsAction)
    def showPortVisibility(self, checked):
        self.ports_visible = checked
        self.update_module(self.module)
    def showTypes(self, checked):
        self.types_visible = checked
        self.update_module(self.module)
    def showEdits(self, checked):
        get_vistrails_configuration().showInlineParameterWidgets = checked
        get_vistrails_persistent_configuration().showInlineParameterWidgets = checked
        scene = self.controller.current_pipeline_scene
        scene.setupScene(self.controller.current_pipeline)
    def build_widget(self):
        name_label = QtGui.QLabel("Name:")
        self.name_edit = QtGui.QLineEdit()
        self.connect(self.name_edit, QtCore.SIGNAL('editingFinished()'),
                     self.name_editing_finished)
        self.name_edit.setMinimumSize(50, 22)
        type_label = QtGui.QLabel("Type:")
        self.type_edit = QtGui.QLabel("")
        package_label = QtGui.QLabel("Package:")
        self.package_edit = QtGui.QLabel("")
        namespace_label = QtGui.QLabel("Namespace:")
        self.namespace_edit = QtGui.QLabel("")
        id = QtGui.QLabel("Id:")
        self.module_id = QtGui.QLabel("")
        self.configure_button = QDockPushButton("Configure")
        self.connect(self.configure_button, QtCore.SIGNAL('clicked()'),
                     self.configure)
        self.doc_button = QDockPushButton("Documentation")
        self.connect(self.doc_button, QtCore.SIGNAL('clicked()'),
                     self.documentation)
        layout = QtGui.QVBoxLayout()
        layout.setMargin(2)
        layout.setSpacing(4)
        def add_line(left, right):
            h_layout = QtGui.QHBoxLayout()
            h_layout.setMargin(2)
            h_layout.setSpacing(2)
            h_layout.setAlignment(QtCore.Qt.AlignLeft)
            h_layout.addWidget(left)
            h_layout.addWidget(right)
            h_widget = QtGui.QWidget()
            h_widget.setLayout(h_layout)
            h_widget.setSizePolicy(QtGui.QSizePolicy.Ignored,
                                   QtGui.QSizePolicy.Preferred)
            layout.addWidget(h_widget)
        add_line(name_label, self.name_edit)
        add_line(type_label, self.type_edit)
        add_line(package_label, self.package_edit)
        add_line(namespace_label, self.namespace_edit)
        add_line(id, self.module_id)
        h_layout = QtGui.QHBoxLayout()
        h_layout.setMargin(2)
        h_layout.setSpacing(5)
        h_layout.setAlignment(QtCore.Qt.AlignCenter)
        h_layout.addWidget(self.configure_button)
        h_layout.addWidget(self.doc_button)
        layout.addLayout(h_layout)
        
        self.tab_widget = QtGui.QTabWidget()
        # keep from overflowing on mac
        if systemType in ['Darwin']:
            self.tab_widget.tabBar().setStyleSheet('font-size: 12pt')
        # this causes a crash when undocking the palette in Mac OS X
        # see https://bugreports.qt-project.org/browse/QTBUG-16851
        # self.tab_widget.setDocumentMode(True)
        self.input_ports_list = PortsList('input')
        self.tab_widget.addTab(self.input_ports_list, 'Inputs')
        self.output_ports_list = PortsList('output')
        self.tab_widget.addTab(self.output_ports_list, 'Outputs')
        self.ports_lists = [self.input_ports_list,
                            self.output_ports_list]
        self.annotations = QModuleAnnotationTable()
        self.tab_widget.addTab(self.annotations, 'Annotations')
        layout.addWidget(self.tab_widget, 1)
        layout.setAlignment(QtCore.Qt.AlignTop)
        self.setLayout(layout)
        self.setWindowTitle('Module Info')
    def setReadOnly(self, read_only):
        if read_only != self.read_only:
            self.read_only = read_only
            for widget in self.ports_lists + [self.annotations]:
                widget.setReadOnly(read_only)
    def set_controller(self, controller):
        if self.controller == controller:
            return
        self.controller = controller
        for ports_list in self.ports_lists:
            ports_list.set_controller(controller)
        self.annotations.set_controller(controller)
        if self.controller is not None:
            scene = self.controller.current_pipeline_scene
            selected_ids = scene.get_selected_module_ids() 
            modules = [self.controller.current_pipeline.modules[i] 
                       for i in selected_ids]
            if len(modules) == 1:
                self.update_module(modules[0])
            else:
                self.update_module(None)
        else:
            self.update_module()
    def set_visible(self, enabled):
        if enabled and \
           self.module is None and \
           not self.toolWindow().isFloating() and \
           not QVersionProp.instance().toolWindow().isFloating() and \
           not self.toolWindow().visibleRegion().isEmpty():
            QVersionProp.instance().set_visible(True)
        else:
            super(QModuleInfo, self).set_visible(enabled)
    def update_module(self, module=None):
        for plist in self.ports_lists:
            plist.types_visible = self.types_visible
            plist.ports_visible = self.ports_visible
        self.module = module
        for ports_list in self.ports_lists:
            ports_list.update_module(module)
        self.annotations.updateModule(module)
        if module is None:
            # We show the version properties tab if both are tabified and
            # self is visible
            if not self.toolWindow().isFloating() and \
               not QVersionProp.instance().toolWindow().isFloating() and \
               not self.toolWindow().visibleRegion().isEmpty():
                QVersionProp.instance().set_visible(True)
            self.name_edit.setText("")
            if not versions_increasing(QtCore.QT_VERSION_STR, '4.7.0'):
                self.name_edit.setPlaceholderText("")
            # self.name_edit.setEnabled(False)
            self.type_edit.setText("")
            # self.type_edit.setEnabled(False)
            self.package_edit.setText("")
            self.namespace_edit.setText("")
            self.module_id.setText("")
        else:
            # We show self  if both are tabified and
            # the version properties tab is visible
            if not self.toolWindow().isFloating() and \
               not QVersionProp.instance().toolWindow().isFloating() and \
               not QVersionProp.instance().toolWindow().visibleRegion().isEmpty():
                self.set_visible(True)
            if module.has_annotation_with_key('__desc__'):
                label = module.get_annotation_by_key('__desc__').value.strip()
            else:
                label = ''
            self.name_edit.setText(label)
            if not label and not versions_increasing(QtCore.QT_VERSION_STR, 
                                                     '4.7.0'):
                self.name_edit.setPlaceholderText(self.module.name)
            self.type_edit.setText(self.module.name)
            self.package_edit.setText(self.module.package)
            if self.module.namespace is not None:
                self.namespace_edit.setText(self.module.namespace.replace('|',
                                                                          '/'))
            else:
                self.namespace_edit.setText('')
            self.module_id.setText('%d' % self.module.id)
    def name_editing_finished(self):
        # updating module may trigger a second call so we check for that
        if self.is_updating or self.module is None:
            return
        try:
            self.is_updating = True
            old_text = ''
            if self.module.has_annotation_with_key('__desc__'):
                old_text = self.module.get_annotation_by_key('__desc__').value
            new_text = str(self.name_edit.text()).strip()
            if not new_text:
                if old_text:
                    self.controller.delete_annotation('__desc__', 
                                                      self.module.id)
            elif old_text != new_text:
                self.controller.add_annotation(('__desc__', new_text), 
                                               self.module.id)
            scene = self.controller.current_pipeline_scene
            scene.recreate_module(self.controller.current_pipeline, 
                                  self.module.id)
        finally:
            self.is_updating = False
            
    def configure(self):
        from vistrails.gui.vistrails_window import _app
        _app.configure_module()
    def documentation(self):
        from vistrails.gui.vistrails_window import _app
        _app.show_documentation()
        
    def update_entry_klass(self, entry_klass):
        self.input_ports_list.set_entry_klass(entry_klass)
        
    def show_annotations(self):
        if self.module is not None:
            self.tab_widget.setCurrentWidget(self.annotations)
            self.annotations.editNextAvailableCell()
 | 
	bsd-3-clause | -1,784,407,531,136,948,700 | 44.386364 | 85 | 0.601259 | false | 
| 
	xthirtyfive/gamemod | 
	guiprovider.py | 
	1 | 
	2081 | 
	#    Copyright 2013 X35
#
#    This file is part of gamemod.
#
#    gamemod is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    gamemod is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with gamemod.  If not, see <http:#www.gnu.org/licenses/>.
from requestcounter import requestcounter
from debug import debug
# provide the gamemod gui & server list
class guiprovider:
	FILECHECK_INTERVAL = 60*60 # 1h
	
	DBGTAG = "guiprovider"
	DBGTAG_REQUEST = DBGTAG+"/request"
	DBGTAG_REPLY = DBGTAG+"/reply"
	
	LIST_REQUEST = "list"
	READABLELIST_REQUEST = "readablelist"
	def __init__(self, reqfunc):
		self.reqfunc = reqfunc
		self.counter = requestcounter()
		
	def request(self, readable=False):
		return self.reqfunc(readable)
	
	def onrequest(self, line, addr, build): # return (reply, close)
		if line == guiprovider.LIST_REQUEST:
			debug.msg(guiprovider.DBGTAG_REQUEST, "%s request from %s:%d (%sbuild)" % ((line,)+addr+("" if build else "don't ",)))
			self.counter.add(addr[0])
			s = (self.request() if build else True)
			debug.msg(guiprovider.DBGTAG_REQUEST, "sending reply to %s request to %s:%d" % ((line,)+addr))
			return s, True
		elif line == guiprovider.READABLELIST_REQUEST:
			debug.msg(guiprovider.DBGTAG_REQUEST, "%s request from %s:%d (%sbuild)" % ((line,)+addr+("" if build else "don't ",)))
			s = (self.request(True) if build else True)
			debug.msg(guiprovider.DBGTAG_REQUEST, "sending reply to %s request to %s:%d" % ((line,)+addr))
			return s, True
		return None, False
	
	def differentips(self):
		return self.counter.differentips()
	
	def requests(self):
		return self.counter.requests()
 | 
	gpl-3.0 | 8,101,542,888,332,923,000 | 35.508772 | 121 | 0.701105 | false | 
| 
	romankagan/DDBWorkbench | 
	plugins/hg4idea/testData/bin/hgext/acl.py | 
	91 | 
	10362 | 
	# acl.py - changeset access control for mercurial
#
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''hooks for controlling repository access
This hook makes it possible to allow or deny write access to given
branches and paths of a repository when receiving incoming changesets
via pretxnchangegroup and pretxncommit.
The authorization is matched based on the local user name on the
system where the hook runs, and not the committer of the original
changeset (since the latter is merely informative).
The acl hook is best used along with a restricted shell like hgsh,
preventing authenticating users from doing anything other than pushing
or pulling. The hook is not safe to use if users have interactive
shell access, as they can then disable the hook. Nor is it safe if
remote users share an account, because then there is no way to
distinguish them.
The order in which access checks are performed is:
1) Deny  list for branches (section ``acl.deny.branches``)
2) Allow list for branches (section ``acl.allow.branches``)
3) Deny  list for paths    (section ``acl.deny``)
4) Allow list for paths    (section ``acl.allow``)
The allow and deny sections take key-value pairs.
Branch-based Access Control
---------------------------
Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
have branch-based access control. Keys in these sections can be
either:
- a branch name, or
- an asterisk, to match any branch;
The corresponding values can be either:
- a comma-separated list containing users and groups, or
- an asterisk, to match anyone;
You can add the "!" prefix to a user or group name to invert the sense
of the match.
Path-based Access Control
-------------------------
Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
access control. Keys in these sections accept a subtree pattern (with
a glob syntax by default). The corresponding values follow the same
syntax as the other sections above.
Groups
------
Group names must be prefixed with an ``@`` symbol. Specifying a group
name has the same effect as specifying all the users in that group.
You can define group members in the ``acl.groups`` section.
If a group name is not defined there, and Mercurial is running under
a Unix-like system, the list of users will be taken from the OS.
Otherwise, an exception will be raised.
Example Configuration
---------------------
::
  [hooks]
  # Use this if you want to check access restrictions at commit time
  pretxncommit.acl = python:hgext.acl.hook
  # Use this if you want to check access restrictions for pull, push,
  # bundle and serve.
  pretxnchangegroup.acl = python:hgext.acl.hook
  [acl]
  # Allow or deny access for incoming changes only if their source is
  # listed here, let them pass otherwise. Source is "serve" for all
  # remote access (http or ssh), "push", "pull" or "bundle" when the
  # related commands are run locally.
  # Default: serve
  sources = serve
  [acl.deny.branches]
  # Everyone is denied to the frozen branch:
  frozen-branch = *
  # A bad user is denied on all branches:
  * = bad-user
  [acl.allow.branches]
  # A few users are allowed on branch-a:
  branch-a = user-1, user-2, user-3
  # Only one user is allowed on branch-b:
  branch-b = user-1
  # The super user is allowed on any branch:
  * = super-user
  # Everyone is allowed on branch-for-tests:
  branch-for-tests = *
  [acl.deny]
  # This list is checked first. If a match is found, acl.allow is not
  # checked. All users are granted access if acl.deny is not present.
  # Format for both lists: glob pattern = user, ..., @group, ...
  # To match everyone, use an asterisk for the user:
  # my/glob/pattern = *
  # user6 will not have write access to any file:
  ** = user6
  # Group "hg-denied" will not have write access to any file:
  ** = @hg-denied
  # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
  # everyone being able to change all other files. See below.
  src/main/resources/DONT-TOUCH-THIS.txt = *
  [acl.allow]
  # if acl.allow is not present, all users are allowed by default
  # empty acl.allow = no users allowed
  # User "doc_writer" has write access to any file under the "docs"
  # folder:
  docs/** = doc_writer
  # User "jack" and group "designers" have write access to any file
  # under the "images" folder:
  images/** = jack, @designers
  # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
  # will have write access to any file under the "resources" folder
  # (except for 1 file. See acl.deny):
  src/main/resources/** = *
  .hgtags = release_engineer
Examples using the "!" prefix
.............................
Suppose there's a branch that only a given user (or group) should be able to
push to, and you don't want to restrict access to any other branch that may
be created.
The "!" prefix allows you to prevent anyone except a given user or group to
push changesets in a given branch or path.
In the examples below, we will:
1) Deny access to branch "ring" to anyone but user "gollum"
2) Deny access to branch "lake" to anyone but members of the group "hobbit"
3) Deny access to a file to anyone but user "gollum"
::
  [acl.allow.branches]
  # Empty
  [acl.deny.branches]
  # 1) only 'gollum' can commit to branch 'ring';
  # 'gollum' and anyone else can still commit to any other branch.
  ring = !gollum
  # 2) only members of the group 'hobbit' can commit to branch 'lake';
  # 'hobbit' members and anyone else can still commit to any other branch.
  lake = !@hobbit
  # You can also deny access based on file paths:
  [acl.allow]
  # Empty
  [acl.deny]
  # 3) only 'gollum' can change the file below;
  # 'gollum' and anyone else can still change any other file.
  /misty/mountains/cave/ring = !gollum
'''
from mercurial.i18n import _
from mercurial import util, match
import getpass, urllib
testedwith = 'internal'
def _getusers(ui, group):
    # First, try to use group definition from section [acl.groups]
    hgrcusers = ui.configlist('acl.groups', group)
    if hgrcusers:
        return hgrcusers
    ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
    # If no users found in group definition, get users from OS-level group
    try:
        return util.groupmembers(group)
    except KeyError:
        raise util.Abort(_("group '%s' is undefined") % group)
def _usermatch(ui, user, usersorgroups):
    if usersorgroups == '*':
        return True
    for ug in usersorgroups.replace(',', ' ').split():
        if ug.startswith('!'):
            # Test for excluded user or group. Format:
            # if ug is a user  name: !username
            # if ug is a group name: !@groupname
            ug = ug[1:]
            if not ug.startswith('@') and user != ug \
                or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
                return True
        # Test for user or group. Format:
        # if ug is a user  name: username
        # if ug is a group name: @groupname
        elif user == ug \
             or ug.startswith('@') and user in _getusers(ui, ug[1:]):
            return True
    return False
def buildmatch(ui, repo, user, key):
    '''return tuple of (match function, list enabled).'''
    if not ui.has_section(key):
        ui.debug('acl: %s not enabled\n' % key)
        return None
    pats = [pat for pat, users in ui.configitems(key)
            if _usermatch(ui, user, users)]
    ui.debug('acl: %s enabled, %d entries for user %s\n' %
             (key, len(pats), user))
    # Branch-based ACL
    if not repo:
        if pats:
            # If there's an asterisk (meaning "any branch"), always return True;
            # Otherwise, test if b is in pats
            if '*' in pats:
                return util.always
            return lambda b: b in pats
        return util.never
    # Path-based ACL
    if pats:
        return match.match(repo.root, '', pats)
    return util.never
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
    if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
        raise util.Abort(_('config error - hook type "%s" cannot stop '
                           'incoming changesets nor commits') % hooktype)
    if (hooktype == 'pretxnchangegroup' and
        source not in ui.config('acl', 'sources', 'serve').split()):
        ui.debug('acl: changes have source "%s" - skipping\n' % source)
        return
    user = None
    if source == 'serve' and 'url' in kwargs:
        url = kwargs['url'].split(':')
        if url[0] == 'remote' and url[1].startswith('http'):
            user = urllib.unquote(url[3])
    if user is None:
        user = getpass.getuser()
    ui.debug('acl: checking access for user "%s"\n' % user)
    cfg = ui.config('acl', 'config')
    if cfg:
        ui.readconfig(cfg, sections = ['acl.groups', 'acl.allow.branches',
        'acl.deny.branches', 'acl.allow', 'acl.deny'])
    allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
    denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
    allow = buildmatch(ui, repo, user, 'acl.allow')
    deny = buildmatch(ui, repo, user, 'acl.deny')
    for rev in xrange(repo[node], len(repo)):
        ctx = repo[rev]
        branch = ctx.branch()
        if denybranches and denybranches(branch):
            raise util.Abort(_('acl: user "%s" denied on branch "%s"'
                               ' (changeset "%s")')
                               % (user, branch, ctx))
        if allowbranches and not allowbranches(branch):
            raise util.Abort(_('acl: user "%s" not allowed on branch "%s"'
                               ' (changeset "%s")')
                               % (user, branch, ctx))
        ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
        % (ctx, branch))
        for f in ctx.files():
            if deny and deny(f):
                raise util.Abort(_('acl: user "%s" denied on "%s"'
                ' (changeset "%s")') % (user, f, ctx))
            if allow and not allow(f):
                raise util.Abort(_('acl: user "%s" not allowed on "%s"'
                ' (changeset "%s")') % (user, f, ctx))
        ui.debug('acl: path access granted: "%s"\n' % ctx)
 | 
	apache-2.0 | 5,239,691,406,254,036,000 | 31.791139 | 80 | 0.641382 | false | 
| 
	jhu-lcsr-forks/ogre | 
	Tools/Blender2.6Export/ogre_mesh_exporter/mesh_impl.py | 
	16 | 
	16080 | 
	# ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
# ########################################################################
# See mesh_exporter.py for explanation.
# ########################################################################
import bpy, mathutils
from ogre_mesh_exporter.log_manager import LogManager, Message
from operator import attrgetter
# Mesh export settings class to define how we are going to export the mesh.
class MeshExportSettings():
	def __init__(self, fixUpAxisToY = True, requireMaterials = True, applyModifiers = False, skeletonNameFollowMesh = True, runOgreXMLConverter = True):
		self.fixUpAxisToY = fixUpAxisToY
		self.requireMaterials = requireMaterials
		self.applyModifiers = applyModifiers
		self.skeletonNameFollowMesh = skeletonNameFollowMesh
		self.runOgreXMLConverter = runOgreXMLConverter
	@classmethod
	def fromRNA(cls, meshObject):
		globalSettings = bpy.context.scene.ogre_mesh_exporter
		meshSettings = meshObject.data.ogre_mesh_exporter
		return MeshExportSettings(
			fixUpAxisToY = globalSettings.fixUpAxisToY,
			requireMaterials = meshSettings.requireMaterials if (meshSettings.requireMaterials_override) else globalSettings.requireMaterials,
			applyModifiers = meshSettings.applyModifiers if (meshSettings.applyModifiers_override) else globalSettings.applyModifiers,
			skeletonNameFollowMesh = meshSettings.skeletonNameFollowMesh if (meshSettings.skeletonNameFollowMesh_override) else globalSettings.skeletonNameFollowMesh,
			runOgreXMLConverter = globalSettings.runOgreXMLConverter)
class BoneWeight():
	def __init__(self, boneIndex, boneWeight):
		self.mBoneIndex = boneIndex
		self.mBoneWeight = boneWeight
class Vertex():
	def __init__(self, pos, norm, uvs = list(), colors = list(), boneWeights = list()):
		self.mPosition = pos
		self.mNormal = norm
		self.mUVs = uvs
		self.mColors = colors
		self.mBoneWeights = boneWeights
	def match(self, norm, uvs, colors):
		# Test normal.
		if (self.mNormal != norm): return False;
		# Test UVs.
		if (len(self.mUVs) is not len(uvs)): return False
		for uv1, uv2 in zip(self.mUVs, uvs):
			if (uv1 != uv2): return False
		# Test Colors.
		if (len(self.mColors) is not len(colors)): return False
		for color1, color2 in zip(self.mColors, colors):
			if (color1 != color2): return False
		return True
class VertexBuffer():
	def __init__(self, uvLayers = 0, colorLayers = 0, hasBoneWeights = False):
		# Vertex data.
		self.mVertexData = list()
		self.mUVLayers = uvLayers
		self.mColorLayers = colorLayers
		self.mHasBoneWeights = hasBoneWeights
		# Blender mesh -> vertex index link.
		# Only useful when exporting.
		self.mMeshVertexIndexLink = dict()
	def reset(self, uvLayers, colorLayers, hasBoneWeights = False):
		self.mVertexData = list()
		self.mUVLayers = uvLayers
		self.mColorLayers = colorLayers
		self.mHasBoneWeights = hasBoneWeights
	def vertexCount(self):
		return len(self.mVertexData)
	# This method adds a vertex from the given blend mesh index into the buffer.
	# If the uv information does not match the recorded vertex, it will automatically
	# clone a new vertex for use.
	def addVertex(self, index, pos, norm, uvs, colors, boneWeights = list(), fixUpAxisToY = True):
		# Fix Up axis to Y (swap Y and Z and negate Z)
		if (fixUpAxisToY):
			pos = [pos[0], pos[2], -pos[1]]
			norm = [norm[0], norm[2], -norm[1]]
		# make sure uv layers and color layers matches as defined.
		if (len(uvs) != self.mUVLayers or len(colors) != self.mColorLayers):
			raise Exception("Invalid UV layer or Color layer count! Expecting uv(%d), color(%d). Got uv(%d), color(%d)" %
				(self.mUVLayers, self.mColorLayers, len(uvs), len(colors)))
		# try to find pre added vertex that matches criteria.
		if (index in self.mMeshVertexIndexLink):
			localIndexList = self.mMeshVertexIndexLink[index]
			for localIndex in localIndexList:
				if (self.mVertexData[localIndex].match(norm, uvs, colors)):
					return localIndex
		# nothing found. so we add a new vertex.
		localIndex = len(self.mVertexData)
		if (index not in self.mMeshVertexIndexLink): self.mMeshVertexIndexLink[index] = list()
		self.mMeshVertexIndexLink[index].append(localIndex)
		self.mVertexData.append(Vertex(pos, norm, uvs, colors, boneWeights))
		return localIndex
	def serialize(self, file, indent = ''):
		extraAttributes = ''
		uvLayerCount = 8 if (self.mUVLayers > 8) else self.mUVLayers
		if (uvLayerCount > 0):
			extraAttributes = ' texture_coords="%d"' % uvLayerCount
			for i in range(uvLayerCount):
				extraAttributes += ' texture_coord_dimensions_%d="float2"' % i
		colorLayerCount = self.mColorLayers
		if (colorLayerCount > 0): extraAttributes += ' colours_diffuse="true"'
		if (colorLayerCount > 1): extraAttributes += ' colours_specular="true"'
		file.write('%s<vertexbuffer positions="true" normals="true"%s>\n' % (indent, extraAttributes))
		for vertex in self.mVertexData:
			file.write('%s\t<vertex>\n' % indent)
			# write position and normal.
			file.write('%s\t\t<position x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mPosition[0], vertex.mPosition[1], vertex.mPosition[2]))
			file.write('%s\t\t<normal x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mNormal[0], vertex.mNormal[1], vertex.mNormal[2]))
			# write UV layers. (NOTE: Blender uses bottom left coord! Ogre uses top left! So we have to flip Y.)
			for i in range(uvLayerCount):
				uv = vertex.mUVs[i]
				file.write('%s\t\t<texcoord u="%.6f" v="%.6f" />\n' % (indent, uv[0], (1.0 - uv[1])))
			# write diffuse.
			if (colorLayerCount > 0):
				color = vertex.mColors[0]
				file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
			# write specular.
			if (colorLayerCount > 1):
				color = vertex.mColors[1]
				file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
			file.write('%s\t</vertex>\n' % indent)
		file.write('%s</vertexbuffer>\n' % indent)
	def serializeBoneAssignments(self, file, indent = ''):
		file.write('%s\t<boneassignments>\n' % indent)
		vertexWithNoBoneAssignements = 0;
		for i, vertex in enumerate(self.mVertexData):
			if (len(vertex.mBoneWeights) == 0): vertexWithNoBoneAssignements += 1
			for boneWeight in vertex.mBoneWeights:
				file.write('%s\t\t<vertexboneassignment vertexindex="%d" boneindex="%d" weight="%.6f" />\n' %
					(indent, i, boneWeight.mBoneIndex, boneWeight.mBoneWeight))
		if (vertexWithNoBoneAssignements > 0):
			LogManager.logMessage("There are %d vertices with no bone assignements!" % vertexWithNoBoneAssignements, Message.LVL_WARNING)
		file.write('%s\t</boneassignments>\n' % indent)
class SubMesh():
	def __init__(self, vertexBuffer = None, meshVertexIndexLink = None, name = None):
		# True if submesh is sharing vertex buffer.
		self.mShareVertexBuffer = False
		# Vertex buffer.
		self.mVertexBuffer = vertexBuffer if (vertexBuffer) else VertexBuffer()
		# Blender mesh -> local/shared vertex index link.
		self.mMeshVertexIndexLink = meshVertexIndexLink if (meshVertexIndexLink) else dict()
		# Face data.
		self.mFaceData = list()
		# Blender material.
		self.mMaterial = None
		# Name of submesh
		self.mName = name
		if ((vertexBuffer is not None) and (meshVertexIndexLink is not None)):
			self.mShareVertexBuffer = True
	def insertPolygon(self, blendMesh, polygon, blendVertexGroups = None, ogreSkeleton = None, fixUpAxisToY = True):
		polygonVertices = polygon.vertices
		polygonVertexCount = polygon.loop_total
		# extract uv information.
		# Here we convert blender uv data into our own
		# uv information that lists uvs by vertices.
		blendUVLoopLayers = blendMesh.uv_layers
		# construct empty polygon vertex uv list.
		polygonVertUVs = list()
		for i in range(polygonVertexCount): polygonVertUVs.append(list())
		for uvLoopLayer in blendUVLoopLayers:
			for i, loopIndex in enumerate(polygon.loop_indices):
				polygonVertUVs[i].append(uvLoopLayer.data[loopIndex].uv)
		# extract color information.
		# Here we convert blender color data into our own
		# color information that lists colors by vertices.
		blendColorLoopLayers = blendMesh.vertex_colors
		# construct empty polygon vertex color list.
		polygonVertColors = list()
		for i in range(polygonVertexCount): polygonVertColors.append(list())
		for colorLoopLayer in blendColorLoopLayers:
			for i, loopIndex in enumerate(polygon.loop_indices):
				polygonVertColors[i].append(colorLoopLayer.data[loopIndex].color)
		# loop through the vertices and add to this submesh.
		localIndices = list()
		useSmooth = polygon.use_smooth
		for index, uvs, colors in zip(polygonVertices, polygonVertUVs, polygonVertColors):
			vertex = blendMesh.vertices[index]
			norm = vertex.normal if (useSmooth) else polygon.normal
			# grab bone weights.
			boneWeights = list()
			if (ogreSkeleton is not None):
				for groupElement in vertex.groups:
					groupName = blendVertexGroups[groupElement.group].name
					boneIndex = ogreSkeleton.getBoneIndex(groupName)
					if (boneIndex == -1 or abs(groupElement.weight) < 0.000001): continue
					boneWeight = groupElement.weight
					boneWeights.append(BoneWeight(boneIndex, boneWeight))
			# trim bone weight count if too many defined.
			if (len(boneWeights) > 4):
				LogManager.logMessage("More than 4 bone weights are defined for a vertex! Best 4 will be used.", Message.LVL_WARNING)
				boneWeights.sort(key=attrgetter('mBoneWeight'), reverse=True)
				while (len(boneWeights) > 4): del boneWeights[-1]
			localIndices.append(self.mVertexBuffer.addVertex(index, vertex.co, norm, uvs, colors, boneWeights, fixUpAxisToY))
		# construct triangle index data.
		if (polygonVertexCount is 3):
			self.mFaceData.append(localIndices)
		else:
			# split quad into triangles.
			self.mFaceData.append(localIndices[:3])
			self.mFaceData.append([localIndices[0], localIndices[2], localIndices[3]])
	def serialize(self, file):
		vertexCount = self.mVertexBuffer.vertexCount()
		materialAttribute = '' if (self.mMaterial is None) else ' material="%s"' % self.mMaterial.name
		file.write('\t\t<submesh%s usesharedvertices="%s" use32bitindexes="%s">\n' %
			(materialAttribute, 'true' if self.mShareVertexBuffer else 'false',
			'true' if (vertexCount > 65536) else 'false'))
		# write face data.
		file.write('\t\t\t<faces count="%d">\n' % len(self.mFaceData))
		for face in self.mFaceData:
			file.write('\t\t\t\t<face v1="%d" v2="%d" v3="%d" />\n' % tuple(face))
		file.write('\t\t\t</faces>\n')
		# write submesh vertex buffer if not shared.
		if (not self.mShareVertexBuffer):
			file.write('\t\t\t<geometry vertexcount="%d">\n' % vertexCount)
			self.mVertexBuffer.serialize(file, '\t\t\t\t')
			file.write('\t\t\t</geometry>\n')
			# write bone assignments
			if (self.mShareVertexBuffer.mHasBoneWeights):
				self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t\t')
		file.write('\t\t</submesh>\n')
class Mesh():
	def __init__(self, blendMesh = None, blendVertexGroups = None, ogreSkeleton = None, exportSettings = MeshExportSettings()):
		# shared vertex buffer.
		self.mSharedVertexBuffer = VertexBuffer()
		# Blender mesh -> shared vertex index link.
		self.mSharedMeshVertexIndexLink = dict()
		# collection of submeshes.
		self.mSubMeshDict = dict()
		# skip blend mesh conversion if no blend mesh passed in.
		if (blendMesh is None): return
		self.mOgreSkeleton = ogreSkeleton
		hasBoneWeights = ogreSkeleton is not None
		# Lets do some pre checking to show warnings if needed.
		uvLayerCount = len(blendMesh.uv_layers)
		colorLayerCount = len(blendMesh.vertex_colors)
		if (uvLayerCount > 8): LogManager.logMessage("More than 8 UV layers in this mesh. Only 8 will be exported.", Message.LVL_WARNING)
		if (colorLayerCount > 2): LogManager.logMessage("More than 2 color layers in this mesh. Only 2 will be exported.", Message.LVL_WARNING)
		# setup shared vertex buffer.
		self.mSharedVertexBuffer.reset(uvLayerCount, colorLayerCount, hasBoneWeights)
		# split up the mesh into submeshes by materials.
		# we first get sub mesh shared vertices option.
		materialList = blendMesh.materials
		materialCount = len(materialList)
		subMeshProperties = blendMesh.ogre_mesh_exporter.subMeshProperties
		while (len(subMeshProperties) < materialCount): subMeshProperties.add() # add more items if needed.
		while (len(subMeshProperties) > materialCount): subMeshProperties.remove(0) # remove items if needed.
		LogManager.logMessage("Material Count: %d" % len(materialList), Message.LVL_INFO)
		for polygon in blendMesh.polygons:
			# get or create submesh.
			if (polygon.material_index in self.mSubMeshDict):
				subMesh = self.mSubMeshDict[polygon.material_index]
			else:
				# instantiate submesh base on wether sharing vertices or not.
				subMeshProperty = subMeshProperties[polygon.material_index]
				if (subMeshProperty.useSharedVertices):
					subMesh = SubMesh(self.mSharedVertexBuffer, self.mSharedMeshVertexIndexLink, subMeshProperty.name)
				else:
					subMesh = SubMesh(VertexBuffer(uvLayerCount, colorLayerCount, hasBoneWeights), name = subMeshProperty.name)
				subMesh.mMaterial = None if (len(materialList) == 0) else materialList[polygon.material_index]
				if (exportSettings.requireMaterials and subMesh.mMaterial == None):
					LogManager.logMessage("Some faces are not assigned with a material!", Message.LVL_WARNING)
					LogManager.logMessage("To hide this warning, please uncheck the 'Require Materials' option.", Message.LVL_WARNING)
				self.mSubMeshDict[polygon.material_index] = subMesh
			# insert polygon.
			subMesh.insertPolygon(blendMesh, polygon, blendVertexGroups, ogreSkeleton, exportSettings.fixUpAxisToY)
	def serialize(self, file):
		file.write('<mesh>\n')
		# write shared vertex buffer if available.
		sharedVertexCount = self.mSharedVertexBuffer.vertexCount()
		if (sharedVertexCount > 0):
			file.write('\t<sharedgeometry vertexcount="%d">\n' % sharedVertexCount)
			self.mSharedVertexBuffer.serialize(file, '\t\t')
			file.write('\t</sharedgeometry>\n')
			# write bone assignments
			if (self.mSharedVertexBuffer.mHasBoneWeights):
				self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t')
		subMeshNames = list()
		# write submeshes.
		file.write('\t<submeshes>\n')
		for subMesh in self.mSubMeshDict.values():
			name = subMesh.mName
			if (name):
				if (not name in subMeshNames):
					subMeshNames.append(name)
				else:
					LogManager.logMessage("Mulitple submesh with same name defined: %s" % name, Message.LVL_WARNING)
			subMesh.serialize(file)
		file.write('\t</submeshes>\n')
		# write submesh names
		if (len(subMeshNames)):
			file.write('\t<submeshnames>\n')
			for index, name in enumerate(subMeshNames):
				file.write('\t\t<submeshname name="%s" index="%d" />\n' % (name, index))
			file.write('\t</submeshnames>\n')
		# write skeleton link
		if (self.mOgreSkeleton is not None):
			file.write('\t<skeletonlink name="%s.skeleton" />\n' % self.mOgreSkeleton.mName)
		file.write('</mesh>\n')
 | 
	mit | 1,976,643,602,277,208,600 | 41.204724 | 157 | 0.721891 | false | 
| 
	Imaginashion/cloud-vision | 
	.fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/encodings/cp1255.py | 
	272 | 
	12466 | 
	""" Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
    def encode(self,input,errors='strict'):
        return codecs.charmap_encode(input,errors,encoding_table)
    def decode(self,input,errors='strict'):
        return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
    def encode(self, input, final=False):
        return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
    def decode(self, input, final=False):
        return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
    pass
class StreamReader(Codec,codecs.StreamReader):
    pass
### encodings module API
def getregentry():
    return codecs.CodecInfo(
        name='cp1255',
        encode=Codec().encode,
        decode=Codec().decode,
        incrementalencoder=IncrementalEncoder,
        incrementaldecoder=IncrementalDecoder,
        streamreader=StreamReader,
        streamwriter=StreamWriter,
    )
### Decoding Table
decoding_table = (
    '\x00'     #  0x00 -> NULL
    '\x01'     #  0x01 -> START OF HEADING
    '\x02'     #  0x02 -> START OF TEXT
    '\x03'     #  0x03 -> END OF TEXT
    '\x04'     #  0x04 -> END OF TRANSMISSION
    '\x05'     #  0x05 -> ENQUIRY
    '\x06'     #  0x06 -> ACKNOWLEDGE
    '\x07'     #  0x07 -> BELL
    '\x08'     #  0x08 -> BACKSPACE
    '\t'       #  0x09 -> HORIZONTAL TABULATION
    '\n'       #  0x0A -> LINE FEED
    '\x0b'     #  0x0B -> VERTICAL TABULATION
    '\x0c'     #  0x0C -> FORM FEED
    '\r'       #  0x0D -> CARRIAGE RETURN
    '\x0e'     #  0x0E -> SHIFT OUT
    '\x0f'     #  0x0F -> SHIFT IN
    '\x10'     #  0x10 -> DATA LINK ESCAPE
    '\x11'     #  0x11 -> DEVICE CONTROL ONE
    '\x12'     #  0x12 -> DEVICE CONTROL TWO
    '\x13'     #  0x13 -> DEVICE CONTROL THREE
    '\x14'     #  0x14 -> DEVICE CONTROL FOUR
    '\x15'     #  0x15 -> NEGATIVE ACKNOWLEDGE
    '\x16'     #  0x16 -> SYNCHRONOUS IDLE
    '\x17'     #  0x17 -> END OF TRANSMISSION BLOCK
    '\x18'     #  0x18 -> CANCEL
    '\x19'     #  0x19 -> END OF MEDIUM
    '\x1a'     #  0x1A -> SUBSTITUTE
    '\x1b'     #  0x1B -> ESCAPE
    '\x1c'     #  0x1C -> FILE SEPARATOR
    '\x1d'     #  0x1D -> GROUP SEPARATOR
    '\x1e'     #  0x1E -> RECORD SEPARATOR
    '\x1f'     #  0x1F -> UNIT SEPARATOR
    ' '        #  0x20 -> SPACE
    '!'        #  0x21 -> EXCLAMATION MARK
    '"'        #  0x22 -> QUOTATION MARK
    '#'        #  0x23 -> NUMBER SIGN
    '$'        #  0x24 -> DOLLAR SIGN
    '%'        #  0x25 -> PERCENT SIGN
    '&'        #  0x26 -> AMPERSAND
    "'"        #  0x27 -> APOSTROPHE
    '('        #  0x28 -> LEFT PARENTHESIS
    ')'        #  0x29 -> RIGHT PARENTHESIS
    '*'        #  0x2A -> ASTERISK
    '+'        #  0x2B -> PLUS SIGN
    ','        #  0x2C -> COMMA
    '-'        #  0x2D -> HYPHEN-MINUS
    '.'        #  0x2E -> FULL STOP
    '/'        #  0x2F -> SOLIDUS
    '0'        #  0x30 -> DIGIT ZERO
    '1'        #  0x31 -> DIGIT ONE
    '2'        #  0x32 -> DIGIT TWO
    '3'        #  0x33 -> DIGIT THREE
    '4'        #  0x34 -> DIGIT FOUR
    '5'        #  0x35 -> DIGIT FIVE
    '6'        #  0x36 -> DIGIT SIX
    '7'        #  0x37 -> DIGIT SEVEN
    '8'        #  0x38 -> DIGIT EIGHT
    '9'        #  0x39 -> DIGIT NINE
    ':'        #  0x3A -> COLON
    ';'        #  0x3B -> SEMICOLON
    '<'        #  0x3C -> LESS-THAN SIGN
    '='        #  0x3D -> EQUALS SIGN
    '>'        #  0x3E -> GREATER-THAN SIGN
    '?'        #  0x3F -> QUESTION MARK
    '@'        #  0x40 -> COMMERCIAL AT
    'A'        #  0x41 -> LATIN CAPITAL LETTER A
    'B'        #  0x42 -> LATIN CAPITAL LETTER B
    'C'        #  0x43 -> LATIN CAPITAL LETTER C
    'D'        #  0x44 -> LATIN CAPITAL LETTER D
    'E'        #  0x45 -> LATIN CAPITAL LETTER E
    'F'        #  0x46 -> LATIN CAPITAL LETTER F
    'G'        #  0x47 -> LATIN CAPITAL LETTER G
    'H'        #  0x48 -> LATIN CAPITAL LETTER H
    'I'        #  0x49 -> LATIN CAPITAL LETTER I
    'J'        #  0x4A -> LATIN CAPITAL LETTER J
    'K'        #  0x4B -> LATIN CAPITAL LETTER K
    'L'        #  0x4C -> LATIN CAPITAL LETTER L
    'M'        #  0x4D -> LATIN CAPITAL LETTER M
    'N'        #  0x4E -> LATIN CAPITAL LETTER N
    'O'        #  0x4F -> LATIN CAPITAL LETTER O
    'P'        #  0x50 -> LATIN CAPITAL LETTER P
    'Q'        #  0x51 -> LATIN CAPITAL LETTER Q
    'R'        #  0x52 -> LATIN CAPITAL LETTER R
    'S'        #  0x53 -> LATIN CAPITAL LETTER S
    'T'        #  0x54 -> LATIN CAPITAL LETTER T
    'U'        #  0x55 -> LATIN CAPITAL LETTER U
    'V'        #  0x56 -> LATIN CAPITAL LETTER V
    'W'        #  0x57 -> LATIN CAPITAL LETTER W
    'X'        #  0x58 -> LATIN CAPITAL LETTER X
    'Y'        #  0x59 -> LATIN CAPITAL LETTER Y
    'Z'        #  0x5A -> LATIN CAPITAL LETTER Z
    '['        #  0x5B -> LEFT SQUARE BRACKET
    '\\'       #  0x5C -> REVERSE SOLIDUS
    ']'        #  0x5D -> RIGHT SQUARE BRACKET
    '^'        #  0x5E -> CIRCUMFLEX ACCENT
    '_'        #  0x5F -> LOW LINE
    '`'        #  0x60 -> GRAVE ACCENT
    'a'        #  0x61 -> LATIN SMALL LETTER A
    'b'        #  0x62 -> LATIN SMALL LETTER B
    'c'        #  0x63 -> LATIN SMALL LETTER C
    'd'        #  0x64 -> LATIN SMALL LETTER D
    'e'        #  0x65 -> LATIN SMALL LETTER E
    'f'        #  0x66 -> LATIN SMALL LETTER F
    'g'        #  0x67 -> LATIN SMALL LETTER G
    'h'        #  0x68 -> LATIN SMALL LETTER H
    'i'        #  0x69 -> LATIN SMALL LETTER I
    'j'        #  0x6A -> LATIN SMALL LETTER J
    'k'        #  0x6B -> LATIN SMALL LETTER K
    'l'        #  0x6C -> LATIN SMALL LETTER L
    'm'        #  0x6D -> LATIN SMALL LETTER M
    'n'        #  0x6E -> LATIN SMALL LETTER N
    'o'        #  0x6F -> LATIN SMALL LETTER O
    'p'        #  0x70 -> LATIN SMALL LETTER P
    'q'        #  0x71 -> LATIN SMALL LETTER Q
    'r'        #  0x72 -> LATIN SMALL LETTER R
    's'        #  0x73 -> LATIN SMALL LETTER S
    't'        #  0x74 -> LATIN SMALL LETTER T
    'u'        #  0x75 -> LATIN SMALL LETTER U
    'v'        #  0x76 -> LATIN SMALL LETTER V
    'w'        #  0x77 -> LATIN SMALL LETTER W
    'x'        #  0x78 -> LATIN SMALL LETTER X
    'y'        #  0x79 -> LATIN SMALL LETTER Y
    'z'        #  0x7A -> LATIN SMALL LETTER Z
    '{'        #  0x7B -> LEFT CURLY BRACKET
    '|'        #  0x7C -> VERTICAL LINE
    '}'        #  0x7D -> RIGHT CURLY BRACKET
    '~'        #  0x7E -> TILDE
    '\x7f'     #  0x7F -> DELETE
    '\u20ac'   #  0x80 -> EURO SIGN
    '\ufffe'   #  0x81 -> UNDEFINED
    '\u201a'   #  0x82 -> SINGLE LOW-9 QUOTATION MARK
    '\u0192'   #  0x83 -> LATIN SMALL LETTER F WITH HOOK
    '\u201e'   #  0x84 -> DOUBLE LOW-9 QUOTATION MARK
    '\u2026'   #  0x85 -> HORIZONTAL ELLIPSIS
    '\u2020'   #  0x86 -> DAGGER
    '\u2021'   #  0x87 -> DOUBLE DAGGER
    '\u02c6'   #  0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
    '\u2030'   #  0x89 -> PER MILLE SIGN
    '\ufffe'   #  0x8A -> UNDEFINED
    '\u2039'   #  0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
    '\ufffe'   #  0x8C -> UNDEFINED
    '\ufffe'   #  0x8D -> UNDEFINED
    '\ufffe'   #  0x8E -> UNDEFINED
    '\ufffe'   #  0x8F -> UNDEFINED
    '\ufffe'   #  0x90 -> UNDEFINED
    '\u2018'   #  0x91 -> LEFT SINGLE QUOTATION MARK
    '\u2019'   #  0x92 -> RIGHT SINGLE QUOTATION MARK
    '\u201c'   #  0x93 -> LEFT DOUBLE QUOTATION MARK
    '\u201d'   #  0x94 -> RIGHT DOUBLE QUOTATION MARK
    '\u2022'   #  0x95 -> BULLET
    '\u2013'   #  0x96 -> EN DASH
    '\u2014'   #  0x97 -> EM DASH
    '\u02dc'   #  0x98 -> SMALL TILDE
    '\u2122'   #  0x99 -> TRADE MARK SIGN
    '\ufffe'   #  0x9A -> UNDEFINED
    '\u203a'   #  0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
    '\ufffe'   #  0x9C -> UNDEFINED
    '\ufffe'   #  0x9D -> UNDEFINED
    '\ufffe'   #  0x9E -> UNDEFINED
    '\ufffe'   #  0x9F -> UNDEFINED
    '\xa0'     #  0xA0 -> NO-BREAK SPACE
    '\xa1'     #  0xA1 -> INVERTED EXCLAMATION MARK
    '\xa2'     #  0xA2 -> CENT SIGN
    '\xa3'     #  0xA3 -> POUND SIGN
    '\u20aa'   #  0xA4 -> NEW SHEQEL SIGN
    '\xa5'     #  0xA5 -> YEN SIGN
    '\xa6'     #  0xA6 -> BROKEN BAR
    '\xa7'     #  0xA7 -> SECTION SIGN
    '\xa8'     #  0xA8 -> DIAERESIS
    '\xa9'     #  0xA9 -> COPYRIGHT SIGN
    '\xd7'     #  0xAA -> MULTIPLICATION SIGN
    '\xab'     #  0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
    '\xac'     #  0xAC -> NOT SIGN
    '\xad'     #  0xAD -> SOFT HYPHEN
    '\xae'     #  0xAE -> REGISTERED SIGN
    '\xaf'     #  0xAF -> MACRON
    '\xb0'     #  0xB0 -> DEGREE SIGN
    '\xb1'     #  0xB1 -> PLUS-MINUS SIGN
    '\xb2'     #  0xB2 -> SUPERSCRIPT TWO
    '\xb3'     #  0xB3 -> SUPERSCRIPT THREE
    '\xb4'     #  0xB4 -> ACUTE ACCENT
    '\xb5'     #  0xB5 -> MICRO SIGN
    '\xb6'     #  0xB6 -> PILCROW SIGN
    '\xb7'     #  0xB7 -> MIDDLE DOT
    '\xb8'     #  0xB8 -> CEDILLA
    '\xb9'     #  0xB9 -> SUPERSCRIPT ONE
    '\xf7'     #  0xBA -> DIVISION SIGN
    '\xbb'     #  0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
    '\xbc'     #  0xBC -> VULGAR FRACTION ONE QUARTER
    '\xbd'     #  0xBD -> VULGAR FRACTION ONE HALF
    '\xbe'     #  0xBE -> VULGAR FRACTION THREE QUARTERS
    '\xbf'     #  0xBF -> INVERTED QUESTION MARK
    '\u05b0'   #  0xC0 -> HEBREW POINT SHEVA
    '\u05b1'   #  0xC1 -> HEBREW POINT HATAF SEGOL
    '\u05b2'   #  0xC2 -> HEBREW POINT HATAF PATAH
    '\u05b3'   #  0xC3 -> HEBREW POINT HATAF QAMATS
    '\u05b4'   #  0xC4 -> HEBREW POINT HIRIQ
    '\u05b5'   #  0xC5 -> HEBREW POINT TSERE
    '\u05b6'   #  0xC6 -> HEBREW POINT SEGOL
    '\u05b7'   #  0xC7 -> HEBREW POINT PATAH
    '\u05b8'   #  0xC8 -> HEBREW POINT QAMATS
    '\u05b9'   #  0xC9 -> HEBREW POINT HOLAM
    '\ufffe'   #  0xCA -> UNDEFINED
    '\u05bb'   #  0xCB -> HEBREW POINT QUBUTS
    '\u05bc'   #  0xCC -> HEBREW POINT DAGESH OR MAPIQ
    '\u05bd'   #  0xCD -> HEBREW POINT METEG
    '\u05be'   #  0xCE -> HEBREW PUNCTUATION MAQAF
    '\u05bf'   #  0xCF -> HEBREW POINT RAFE
    '\u05c0'   #  0xD0 -> HEBREW PUNCTUATION PASEQ
    '\u05c1'   #  0xD1 -> HEBREW POINT SHIN DOT
    '\u05c2'   #  0xD2 -> HEBREW POINT SIN DOT
    '\u05c3'   #  0xD3 -> HEBREW PUNCTUATION SOF PASUQ
    '\u05f0'   #  0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
    '\u05f1'   #  0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
    '\u05f2'   #  0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
    '\u05f3'   #  0xD7 -> HEBREW PUNCTUATION GERESH
    '\u05f4'   #  0xD8 -> HEBREW PUNCTUATION GERSHAYIM
    '\ufffe'   #  0xD9 -> UNDEFINED
    '\ufffe'   #  0xDA -> UNDEFINED
    '\ufffe'   #  0xDB -> UNDEFINED
    '\ufffe'   #  0xDC -> UNDEFINED
    '\ufffe'   #  0xDD -> UNDEFINED
    '\ufffe'   #  0xDE -> UNDEFINED
    '\ufffe'   #  0xDF -> UNDEFINED
    '\u05d0'   #  0xE0 -> HEBREW LETTER ALEF
    '\u05d1'   #  0xE1 -> HEBREW LETTER BET
    '\u05d2'   #  0xE2 -> HEBREW LETTER GIMEL
    '\u05d3'   #  0xE3 -> HEBREW LETTER DALET
    '\u05d4'   #  0xE4 -> HEBREW LETTER HE
    '\u05d5'   #  0xE5 -> HEBREW LETTER VAV
    '\u05d6'   #  0xE6 -> HEBREW LETTER ZAYIN
    '\u05d7'   #  0xE7 -> HEBREW LETTER HET
    '\u05d8'   #  0xE8 -> HEBREW LETTER TET
    '\u05d9'   #  0xE9 -> HEBREW LETTER YOD
    '\u05da'   #  0xEA -> HEBREW LETTER FINAL KAF
    '\u05db'   #  0xEB -> HEBREW LETTER KAF
    '\u05dc'   #  0xEC -> HEBREW LETTER LAMED
    '\u05dd'   #  0xED -> HEBREW LETTER FINAL MEM
    '\u05de'   #  0xEE -> HEBREW LETTER MEM
    '\u05df'   #  0xEF -> HEBREW LETTER FINAL NUN
    '\u05e0'   #  0xF0 -> HEBREW LETTER NUN
    '\u05e1'   #  0xF1 -> HEBREW LETTER SAMEKH
    '\u05e2'   #  0xF2 -> HEBREW LETTER AYIN
    '\u05e3'   #  0xF3 -> HEBREW LETTER FINAL PE
    '\u05e4'   #  0xF4 -> HEBREW LETTER PE
    '\u05e5'   #  0xF5 -> HEBREW LETTER FINAL TSADI
    '\u05e6'   #  0xF6 -> HEBREW LETTER TSADI
    '\u05e7'   #  0xF7 -> HEBREW LETTER QOF
    '\u05e8'   #  0xF8 -> HEBREW LETTER RESH
    '\u05e9'   #  0xF9 -> HEBREW LETTER SHIN
    '\u05ea'   #  0xFA -> HEBREW LETTER TAV
    '\ufffe'   #  0xFB -> UNDEFINED
    '\ufffe'   #  0xFC -> UNDEFINED
    '\u200e'   #  0xFD -> LEFT-TO-RIGHT MARK
    '\u200f'   #  0xFE -> RIGHT-TO-LEFT MARK
    '\ufffe'   #  0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
 | 
	mit | 5,207,704,298,974,207,000 | 39.605863 | 119 | 0.520777 | false | 
| 
	levibostian/myBlanky | 
	googleAppEngine/google/appengine/api/search/__init__.py | 
	8 | 
	2549 | 
	#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Search API module."""
from search import AtomField
from search import Cursor
from search import DateField
from search import DeleteError
from search import DeleteResult
from search import Document
from search import DOCUMENT_ID_FIELD_NAME
from search import Error
from search import ExpressionError
from search import Field
from search import FieldExpression
from search import GeoField
from search import GeoPoint
from search import get_indexes
from search import GetResponse
from search import HtmlField
from search import Index
from search import InternalError
from search import InvalidRequest
from search import LANGUAGE_FIELD_NAME
from search import MatchScorer
from search import MAXIMUM_DOCUMENT_ID_LENGTH
from search import MAXIMUM_DOCUMENTS_PER_PUT_REQUEST
from search import MAXIMUM_DOCUMENTS_RETURNED_PER_SEARCH
from search import MAXIMUM_EXPRESSION_LENGTH
from search import MAXIMUM_FIELD_ATOM_LENGTH
from search import MAXIMUM_FIELD_NAME_LENGTH
from search import MAXIMUM_FIELD_VALUE_LENGTH
from search import MAXIMUM_FIELDS_RETURNED_PER_SEARCH
from search import MAXIMUM_GET_INDEXES_OFFSET
from search import MAXIMUM_INDEX_NAME_LENGTH
from search import MAXIMUM_INDEXES_RETURNED_PER_GET_REQUEST
from search import MAXIMUM_NUMBER_FOUND_ACCURACY
from search import MAXIMUM_QUERY_LENGTH
from search import MAXIMUM_SEARCH_OFFSET
from search import MAXIMUM_SORTED_DOCUMENTS
from search import NumberField
from search import OperationResult
from search import PutError
from search import PutResult
from search import Query
from search import QueryError
from search import QueryOptions
from search import RANK_FIELD_NAME
from search import RescoringMatchScorer
from search import SCORE_FIELD_NAME
from search import ScoredDocument
from search import SearchResults
from search import SortExpression
from search import SortOptions
from search import TextField
from search import TIMESTAMP_FIELD_NAME
from search import TransientError
 | 
	mit | 1,058,798,410,770,367,900 | 32.986667 | 74 | 0.830129 | false | 
| 
	shakamunyi/neutron | 
	neutron/db/metering/metering_rpc.py | 
	46 | 
	2075 | 
	# Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging
from neutron.common import constants as consts
from neutron.common import utils
from neutron.i18n import _LE
from neutron import manager
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
class MeteringRpcCallbacks(object):
    target = oslo_messaging.Target(version='1.0')
    def __init__(self, meter_plugin):
        self.meter_plugin = meter_plugin
    def get_sync_data_metering(self, context, **kwargs):
        l3_plugin = manager.NeutronManager.get_service_plugins().get(
            service_constants.L3_ROUTER_NAT)
        if not l3_plugin:
            return
        host = kwargs.get('host')
        if not utils.is_extension_supported(
            l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host:
            return self.meter_plugin.get_sync_data_metering(context)
        else:
            agents = l3_plugin.get_l3_agents(context, filters={'host': [host]})
            if not agents:
                LOG.error(_LE('Unable to find agent %s.'), host)
                return
            routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id)
            router_ids = [router['id'] for router in routers['routers']]
            if not router_ids:
                return
        return self.meter_plugin.get_sync_data_metering(context,
                                                        router_ids=router_ids)
 | 
	apache-2.0 | -1,353,348,655,059,363,800 | 36.053571 | 79 | 0.660241 | false | 
| 
	ElephoneApp/kubernetes | 
	cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py | 
	105 | 
	4115 | 
	#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock, MagicMock
from path import Path
import pytest
import sys
# Munge the python path so we can find our hook code
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
# Import the modules from the hook
import install
class TestInstallHook():
    @patch('install.path')
    def test_update_rc_files(self, pmock):
        """
        Test happy path on updating env files. Assuming everything
        exists and is in place.
        """
        pmock.return_value.lines.return_value =  ['line1', 'line2']
        install.update_rc_files(['test1', 'test2'])
        pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
                                                           'test1', 'test2'])
    def test_update_rc_files_with_nonexistant_path(self):
        """
        Test an unhappy path if the bashrc/users do not exist.
        """
        with pytest.raises(OSError) as exinfo:
            install.update_rc_files(['test1','test2'])
    @patch('install.fetch')
    @patch('install.hookenv')
    def test_package_installation(self, hemock, ftmock):
        """
        Verify we are calling the known essentials to build and syndicate
        kubes.
        """
        pkgs = ['build-essential', 'git',
                'make', 'nginx', 'python-pip']
        install.install_packages()
        hemock.log.assert_called_with('Installing Debian packages')
        ftmock.filter_installed_packages.assert_called_with(pkgs)
    @patch('install.archiveurl.ArchiveUrlFetchHandler')
    def test_go_download(self, aumock):
        """
            Test that we are actually handing off to charm-helpers to
            download a specific archive of Go. This is non-configurable so
            its reasonably safe to assume we're going to always do this,
            and when it changes we shall curse the brittleness of this test.
        """
        ins_mock = aumock.return_value.install
        install.download_go()
        url = 'https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz'
        sha1='5020af94b52b65cc9b6f11d50a67e4bae07b0aff'
        ins_mock.assert_called_with(url, '/usr/local', sha1, 'sha1')
    @patch('install.subprocess')
    def test_clone_repository(self, spmock):
        """
         We're not using a unit-tested git library - so ensure our subprocess
         call is consistent. If we change this, we want to know we've broken it.
        """
        install.clone_repository()
        repo = 'https://github.com/GoogleCloudPlatform/kubernetes.git'
        direct = '/opt/kubernetes'
        spmock.check_output.assert_called_with(['git', 'clone', repo, direct])
    @patch('install.install_packages')
    @patch('install.download_go')
    @patch('install.clone_repository')
    @patch('install.update_rc_files')
    @patch('install.hookenv')
    def test_install_main(self, hemock, urmock, crmock, dgmock, ipmock):
        """
        Ensure the driver/main method is calling all the supporting methods.
        """
        strings = [
        'export GOROOT=/usr/local/go\n',
        'export PATH=$PATH:$GOROOT/bin\n',
        'export KUBE_MASTER_IP=0.0.0.0\n',
        'export KUBERNETES_MASTER=http://$KUBE_MASTER_IP\n',
        ]
        install.install()
        crmock.assert_called_once()
        dgmock.assert_called_once()
        crmock.assert_called_once()
        urmock.assert_called_with(strings)
        hemock.open_port.assert_called_with(8080)
 | 
	apache-2.0 | -4,471,815,432,505,795,600 | 37.101852 | 80 | 0.641555 | false | 
| 
	cedadev/cis | 
	cis/test/integration/test_eval.py | 
	3 | 
	7743 | 
	import netCDF4
import numpy
from hamcrest import assert_that, is_
import unittest
from cis.cis_main import evaluate_cmd, col_cmd
from cis.test.integration.base_integration_test import BaseIntegrationTest
from cis.test.integration_test_data import *
from cis.parse import parse_args
from cis.test.unit.eval.test_calc import compare_masked_arrays
class TestEval(BaseIntegrationTest):
    def test_Aeronet_wavelength_calculation(self):
        # Example from the CIS Phase 3 Software spec:
        # ... a user should be able to write a plugin to calculate the Aeronet AOD at 550nm from the AOD at 500 nm as
        # AOD550 = AOD500 * (550/500)^(-1*Angstrom500-870)"
        # Takes 3s
        args = ['eval', 'AOT_500,500-870Angstrom=a550to870:' + escape_colons(another_valid_aeronet_filename),
                'AOT_500 * (550.0/500)**(-1*a550to870)', '1', '-o', self.OUTPUT_FILENAME]
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        # Check correct:
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        calculated_result = self.ds.variables['calculated_variable'][:]
        expected_result = [0.2341039087, 0.2285401152, 0.2228799533, 0.1953746746, 0.2094051561, 0.1696889668,
                           0.3137791803, 0.2798929273, 0.1664194279, 0.1254619092, 0.1258309124, 0.1496960031,
                           0.0768447737, 0.0550896430, 0.0534543107, 0.0538315909, 0.0666742975, 0.0512935449,
                           0.0699585189, 0.0645033944]
        assert_that(calculated_result.shape, is_((3140,)))
        assert numpy.allclose(expected_result, calculated_result[0:20])
    def test_ECHAMHAM_wavelength_sum(self):
        args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
                '%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), '1', '-o', self.OUTPUT_FILENAME]
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        # Check correct:
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        calculated_result = self.ds.variables['calculated_variable'][:]
        # A hand calculated selection of values
        expected_result = [0.007633533, 0.007646653, 0.007749859, 0.007744226, 0.007761176]
        assert_that(calculated_result.shape, is_((96, 192)))
        assert numpy.allclose(expected_result, calculated_result[:][0][0:5])
    def test_collocated_NetCDF_Gridded_onto_GASSP(self):
        # First do a collocation of ECHAMHAM onto GASSP
        vars = valid_echamham_variable_1, valid_echamham_variable_2
        filename = escape_colons(valid_echamham_filename)
        sample_file = escape_colons(valid_GASSP_aeroplane_filename)
        sample_var = valid_GASSP_aeroplane_variable
        collocator_and_opts = 'nn[missing_data_for_missing_sample=True],variable=%s' % sample_var
        arguments = ['col', ",".join(vars) + ':' + filename,
                     sample_file + ':collocator=' + collocator_and_opts,
                     '-o', 'collocated_gassp']
        main_arguments = parse_args(arguments)
        col_cmd(main_arguments)
        # Check collocation is the same
        self.ds = netCDF4.Dataset('collocated_gassp.nc')
        col_var1 = self.ds.variables[valid_echamham_variable_1][:]
        col_var2 = self.ds.variables[valid_echamham_variable_2][:]
        # A hand calculated selection of values
        expected_col1 = numpy.ma.masked_invalid(
            [float('Nan'), float('Nan'), float('Nan'), 0.0814601778984, 0.0814601778984])
        compare_masked_arrays(expected_col1, col_var1[:][0:5])
        expected_col2 = numpy.ma.masked_invalid(
            [float('Nan'), float('Nan'), float('Nan'), 0.0741240680218, 0.0741240680218])
        compare_masked_arrays(expected_col2, col_var2[:][0:5])
        # Then do an evaluation using the collocated data:
        args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2,
                                      'collocated_gassp.nc'),
                "%s=gassp_alias:%s" % (valid_GASSP_aeroplane_variable, escape_colons(valid_GASSP_aeroplane_filename)),
                "(%s + %s) / gassp_alias " % (valid_echamham_variable_1, valid_echamham_variable_2),
                '1', '-o', self.OUTPUT_FILENAME]
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        self.ds.close()
        # Check correct
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        calculated_result = self.ds.variables['calculated_variable'][:]
        # A hand calculated selection of values
        expected_result = numpy.ma.masked_invalid([0.00196121983491, 0.00197255626472, 0.00120850731992])
        assert_that(calculated_result.shape, is_((311,)))
        # Check the first 3 vald values
        compare_masked_arrays(expected_result, calculated_result[:][10:13])
        os.remove('collocated_gassp.nc')
    @skip_pyhdf
    def test_CloudSat(self):
        args = ['eval', "%s,%s:%s" % (valid_cloudsat_RVOD_sdata_variable, valid_cloudsat_RVOD_vdata_variable,
                                      escape_colons(valid_cloudsat_RVOD_file)),
                '%s/%s' % (valid_cloudsat_RVOD_sdata_variable, valid_cloudsat_RVOD_vdata_variable), 'ppm', '-o',
                'cloudsat_var:' + self.OUTPUT_FILENAME]
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        assert_that(self.ds.variables['cloudsat_var'].units, is_('ppm'))
    def test_can_specify_output_variable(self):
        args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
                '%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), 'kg m^-3',
                '-o', 'var_out:' + self.OUTPUT_FILENAME]
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        assert 'var_out' in self.ds.variables
    def test_can_specify_attributes_gridded(self):
        args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
                '%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), 'kg m^-3',
                '-o', 'var_out:' + self.OUTPUT_FILENAME, '-a', 'att1=val1,att2=val2']
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        assert_that(self.ds.variables['var_out'].att1, is_('val1'))
        assert_that(self.ds.variables['var_out'].att2, is_('val2'))
    def test_can_specify_units_gridded(self):
        args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
                '%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), 'kg m^-3',
                '-o', 'var_out:' + self.OUTPUT_FILENAME, '-a', 'att1=val1,att2=val2']
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        assert_that(self.ds.variables['var_out'].units, is_('kg m^-3'))
    def test_can_specify_units_gridded_no_output_var(self):
        args = ['eval', "%s:%s" % (valid_hadgem_variable, escape_colons(valid_hadgem_filename)), "od550aer", "ppm", "-o",
                self.OUTPUT_FILENAME, "-a", "att1=val1"]
        arguments = parse_args(args)
        evaluate_cmd(arguments)
        self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
        assert_that(self.ds.variables['calculated_variable'].units, is_('ppm'))
        assert_that(self.ds.variables['calculated_variable'].att1, is_('val1'))
 | 
	lgpl-3.0 | 4,635,982,652,704,962,000 | 51.673469 | 132 | 0.62818 | false | 
| 
	diego-d5000/MisValesMd | 
	env/lib/python2.7/site-packages/django/template/loaders/app_directories.py | 
	1 | 
	1602 | 
	"""
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import io
from django.core.exceptions import SuspiciousFileOperation
from django.template.base import TemplateDoesNotExist
from django.template.utils import get_app_template_dirs
from django.utils._os import safe_join
from .base import Loader as BaseLoader
class Loader(BaseLoader):
    is_usable = True
    def get_template_sources(self, template_name, template_dirs=None):
        """
        Returns the absolute paths to "template_name", when appended to each
        directory in "template_dirs". Any paths that don't lie inside one of the
        template dirs are excluded from the result set, for security reasons.
        """
        if not template_dirs:
            template_dirs = get_app_template_dirs('templates')
        for template_dir in template_dirs:
            try:
                yield safe_join(template_dir, template_name)
            except SuspiciousFileOperation:
                # The joined path was located outside of this template_dir
                # (it might be inside another one, so this isn't fatal).
                pass
    def load_template_source(self, template_name, template_dirs=None):
        for filepath in self.get_template_sources(template_name, template_dirs):
            try:
                with io.open(filepath, encoding=self.engine.file_charset) as fp:
                    return fp.read(), filepath
            except IOError:
                pass
        raise TemplateDoesNotExist(template_name)
 | 
	mit | 3,960,476,586,008,162,300 | 36.142857 | 80 | 0.642322 | false | 
| 
	beatrizjesus/my-first-blog | 
	pasta/Lib/site-packages/django/contrib/redirects/migrations/0001_initial.py | 
	142 | 
	1271 | 
	# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
    dependencies = [
        ('sites', '0001_initial'),
    ]
    operations = [
        migrations.CreateModel(
            name='Redirect',
            fields=[
                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
                ('site', models.ForeignKey(to='sites.Site', to_field='id')),
                ('old_path', models.CharField(help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.", max_length=200, verbose_name='redirect from', db_index=True)),
                ('new_path', models.CharField(help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.", max_length=200, verbose_name='redirect to', blank=True)),
            ],
            options={
                'ordering': ('old_path',),
                'unique_together': set([('site', 'old_path')]),
                'db_table': 'django_redirect',
                'verbose_name': 'redirect',
                'verbose_name_plural': 'redirects',
            },
            bases=(models.Model,),
        ),
    ]
 | 
	mit | -7,705,647,044,940,095,000 | 40 | 209 | 0.556255 | false | 
| 
	denisenkom/django | 
	tests/bulk_create/tests.py | 
	5 | 
	6664 | 
	from __future__ import unicode_literals
from operator import attrgetter
from django.db import connection
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import override_settings
from .models import Country, Restaurant, Pizzeria, State, TwoFields
class BulkCreateTests(TestCase):
    def setUp(self):
        self.data = [
            Country(name="United States of America", iso_two_letter="US"),
            Country(name="The Netherlands", iso_two_letter="NL"),
            Country(name="Germany", iso_two_letter="DE"),
            Country(name="Czech Republic", iso_two_letter="CZ")
        ]
    def test_simple(self):
        created = Country.objects.bulk_create(self.data)
        self.assertEqual(len(created), 4)
        self.assertQuerysetEqual(Country.objects.order_by("-name"), [
            "United States of America", "The Netherlands", "Germany", "Czech Republic"
        ], attrgetter("name"))
        created = Country.objects.bulk_create([])
        self.assertEqual(created, [])
        self.assertEqual(Country.objects.count(), 4)
    @skipUnlessDBFeature('has_bulk_insert')
    def test_efficiency(self):
        with self.assertNumQueries(1):
            Country.objects.bulk_create(self.data)
    def test_inheritance(self):
        Restaurant.objects.bulk_create([
            Restaurant(name="Nicholas's")
        ])
        self.assertQuerysetEqual(Restaurant.objects.all(), [
            "Nicholas's",
        ], attrgetter("name"))
        with self.assertRaises(ValueError):
            Pizzeria.objects.bulk_create([
                Pizzeria(name="The Art of Pizza")
            ])
        self.assertQuerysetEqual(Pizzeria.objects.all(), [])
        self.assertQuerysetEqual(Restaurant.objects.all(), [
            "Nicholas's",
        ], attrgetter("name"))
    def test_non_auto_increment_pk(self):
        State.objects.bulk_create([
            State(two_letter_code=s)
            for s in ["IL", "NY", "CA", "ME"]
        ])
        self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
            "CA", "IL", "ME", "NY",
        ], attrgetter("two_letter_code"))
    @skipUnlessDBFeature('has_bulk_insert')
    def test_non_auto_increment_pk_efficiency(self):
        with self.assertNumQueries(1):
            State.objects.bulk_create([
                State(two_letter_code=s)
                for s in ["IL", "NY", "CA", "ME"]
            ])
        self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
            "CA", "IL", "ME", "NY",
        ], attrgetter("two_letter_code"))
    @skipIfDBFeature('allows_primary_key_0')
    def test_zero_as_autoval(self):
        """
        Zero as id for AutoField should raise exception in MySQL, because MySQL
        does not allow zero for automatic primary key.
        """
        valid_country = Country(name='Germany', iso_two_letter='DE')
        invalid_country = Country(id=0, name='Poland', iso_two_letter='PL')
        with self.assertRaises(ValueError):
            Country.objects.bulk_create([valid_country, invalid_country])
    def test_batch_same_vals(self):
        # Sqlite had a problem where all the same-valued models were
        # collapsed to one insert.
        Restaurant.objects.bulk_create([
            Restaurant(name='foo') for i in range(0, 2)
        ])
        self.assertEqual(Restaurant.objects.count(), 2)
    def test_large_batch(self):
        with override_settings(DEBUG=True):
            connection.queries = []
            TwoFields.objects.bulk_create([
                   TwoFields(f1=i, f2=i+1) for i in range(0, 1001)
                ])
        self.assertEqual(TwoFields.objects.count(), 1001)
        self.assertEqual(
            TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(),
            101)
        self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101)
    @skipUnlessDBFeature('has_bulk_insert')
    def test_large_single_field_batch(self):
        # SQLite had a problem with more than 500 UNIONed selects in single
        # query.
        Restaurant.objects.bulk_create([
            Restaurant() for i in range(0, 501)
        ])
    @skipUnlessDBFeature('has_bulk_insert')
    def test_large_batch_efficiency(self):
        with override_settings(DEBUG=True):
            connection.queries = []
            TwoFields.objects.bulk_create([
                   TwoFields(f1=i, f2=i+1) for i in range(0, 1001)
                ])
            self.assertTrue(len(connection.queries) < 10)
    def test_large_batch_mixed(self):
        """
        Test inserting a large batch with objects having primary key set
        mixed together with objects without PK set.
        """
        with override_settings(DEBUG=True):
            connection.queries = []
            TwoFields.objects.bulk_create([
                TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i+1)
                for i in range(100000, 101000)])
        self.assertEqual(TwoFields.objects.count(), 1000)
        # We can't assume much about the ID's created, except that the above
        # created IDs must exist.
        id_range = range(100000, 101000, 2)
        self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500)
        self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500)
    @skipUnlessDBFeature('has_bulk_insert')
    def test_large_batch_mixed_efficiency(self):
        """
        Test inserting a large batch with objects having primary key set
        mixed together with objects without PK set.
        """
        with override_settings(DEBUG=True):
            connection.queries = []
            TwoFields.objects.bulk_create([
                TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i+1)
                for i in range(100000, 101000)])
            self.assertTrue(len(connection.queries) < 10)
    def test_explicit_batch_size(self):
        objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)]
        TwoFields.objects.bulk_create(objs, 2)
        self.assertEqual(TwoFields.objects.count(), len(objs))
        TwoFields.objects.all().delete()
        TwoFields.objects.bulk_create(objs, len(objs))
        self.assertEqual(TwoFields.objects.count(), len(objs))
    @skipUnlessDBFeature('has_bulk_insert')
    def test_explicit_batch_size_efficiency(self):
        objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)]
        with self.assertNumQueries(2):
            TwoFields.objects.bulk_create(objs, 50)
        TwoFields.objects.all().delete()
        with self.assertNumQueries(1):
            TwoFields.objects.bulk_create(objs, len(objs))
 | 
	bsd-3-clause | -8,335,335,551,976,320,000 | 38.904192 | 86 | 0.607143 | false | 
| 
	Vogeltak/pauselan | 
	lib/python3.4/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py | 
	59 | 
	3387 | 
	# mysql/gaerdbms.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+gaerdbms
    :name: Google Cloud SQL
    :dbapi: rdbms
    :connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
    :url: https://developers.google.com/appengine/docs/python/cloud-sql/\
developers-guide
    This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
    minimal changes.
    .. versionadded:: 0.7.8
    .. deprecated:: 1.0 This dialect is **no longer necessary** for
        Google Cloud SQL; the MySQLdb dialect can be used directly.
        Cloud SQL now recommends creating connections via the
        mysql dialect using the URL format
        ``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections.  The :class:`.NullPool`
implementation is installed within the :class:`.Engine` by
default.
"""
import os
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
import re
from sqlalchemy.util import warn_deprecated
def _is_dev_environment():
    return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
    @classmethod
    def dbapi(cls):
        warn_deprecated(
            "Google Cloud SQL now recommends creating connections via the "
            "MySQLdb dialect directly, using the URL format "
            "mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
            "<projectid>:<instancename>"
        )
        # from django:
        # http://code.google.com/p/googleappengine/source/
        #     browse/trunk/python/google/storage/speckle/
        # python/django/backend/base.py#118
        # see also [ticket:2649]
        # see also http://stackoverflow.com/q/14224679/34549
        from google.appengine.api import apiproxy_stub_map
        if _is_dev_environment():
            from google.appengine.api import rdbms_mysqldb
            return rdbms_mysqldb
        elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
            from google.storage.speckle.python.api import rdbms_apiproxy
            return rdbms_apiproxy
        else:
            from google.storage.speckle.python.api import rdbms_googleapi
            return rdbms_googleapi
    @classmethod
    def get_pool_class(cls, url):
        # Cloud SQL connections die at any moment
        return NullPool
    def create_connect_args(self, url):
        opts = url.translate_connect_args()
        if not _is_dev_environment():
            # 'dsn' and 'instance' are because we are skipping
            # the traditional google.api.rdbms wrapper
            opts['dsn'] = ''
            opts['instance'] = url.query['instance']
        return [], opts
    def _extract_error_code(self, exception):
        match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
        # The rdbms api will wrap then re-raise some types of errors
        # making this regex return no matches.
        code = match.group(1) or match.group(2) if match else None
        if code:
            return int(code)
dialect = MySQLDialect_gaerdbms
 | 
	gpl-2.0 | -222,051,937,422,292,350 | 32.205882 | 91 | 0.658105 | false | 
| 
	numericillustration/sdc-imgapi | 
	tools/manatee-diff/manatee2images.py | 
	2 | 
	1948 | 
	#!/usr/bin/env python
"""
Take a imgapi_images-*.gz manatee table dump and emit a JSON array of images.
Usage:
    gzcat imgapi_images-2014-11-15-00-01-56.gz | ./manatee2images.py > images.json
"""
import json
import sys
import operator
from pprint import pprint
import codecs
# TODO: ideally we wouldn't hardcode types here. This should come from
# the imgapi_images bucket definition.
type_from_key = {
    'billing_tags': 'array',
    'published_at': 'string',
    'acl': 'array',
    'public': 'bool',
}
def update_img_from_index(img, entry, header, key):
    try:
        type = type_from_key[key]
        idx = header.index(key)   # cache this?
        val = entry[idx]
        # Postgres NULL
        if val == '\\N':
            if key in img:
                del img[key]
            return
        if type == 'array' and val.startswith('{') and val.endswith('}'):
            # Hack parsing of postgres arrays.
            val = [tag for tag in val[1:-1].split(',') if tag]
        elif type == 'bool':
            if val == 't':
                val = True
            elif val == 'f':
                val = False
            else:
                raise RuntimeError(
                    'unexpected index value for "%s" bool field: %r'
                    % (key, val))
        img[key] = val
    except ValueError:
        pass
header = None
published_at_idx = None
acl_idx = None
imgs = []
for line in sys.stdin:
    if header is None:
        header = json.loads(line)['keys']
        assert header[3] == '_value'
        continue
    entry = json.loads(line)['entry']
    img = json.loads(entry[3])
    # Apply some of the index values.
    # TODO: eventually should do all of these
    for key in ['billing_tags', 'published_at', 'acl', 'public']:
        update_img_from_index(img, entry, header, key)
    imgs.append(img)
imgs.sort(key=operator.itemgetter('uuid'))
print json.dumps(imgs, sort_keys=True, indent=4)
 | 
	mpl-2.0 | 3,893,063,082,592,945,000 | 23.974359 | 82 | 0.563655 | false | 
| 
	lancezlin/ml_template_py | 
	lib/python2.7/site-packages/wheel/metadata.py | 
	93 | 
	11676 | 
	"""
Tools for converting old- to new-style metadata.
"""
from collections import namedtuple
from .pkginfo import read_pkg_info
from .util import OrderedDefaultDict
try:
    from collections import OrderedDict
except ImportError:
    OrderedDict = dict
import re
import os.path
import textwrap
import pkg_resources
import email.parser
from . import __version__ as wheel_version
METADATA_VERSION = "2.0"
PLURAL_FIELDS = { "classifier" : "classifiers",
                  "provides_dist" : "provides",
                  "provides_extra" : "extras" }
SKIP_FIELDS = set()
CONTACT_FIELDS = (({"email":"author_email", "name": "author"},
                    "author"),
                  ({"email":"maintainer_email", "name": "maintainer"},
                    "maintainer"))
# commonly filled out as "UNKNOWN" by distutils:
UNKNOWN_FIELDS = set(("author", "author_email", "platform", "home_page",
                      "license"))
# Wheel itself is probably the only program that uses non-extras markers
# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
KEYWORDS_RE = re.compile("[\0-,]+")
MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra'))
def unique(iterable):
    """
    Yield unique values in iterable, preserving order.
    """
    seen = set()
    for value in iterable:
        if not value in seen:
            seen.add(value)
            yield value
def handle_requires(metadata, pkg_info, key):
    """
    Place the runtime requirements from pkg_info into metadata.
    """
    may_requires = OrderedDefaultDict(list)
    for value in sorted(pkg_info.get_all(key)):
        extra_match = EXTRA_RE.search(value)
        if extra_match:
            groupdict = extra_match.groupdict()
            condition = groupdict['condition']
            extra = groupdict['extra']
            package = groupdict['package']
            if condition.endswith(' and '):
                condition = condition[:-5]
        else:
            condition, extra = None, None
            package = value
        key = MayRequiresKey(condition, extra)
        may_requires[key].append(package)
    if may_requires:
        metadata['run_requires'] = []
        def sort_key(item):
            # Both condition and extra could be None, which can't be compared
            # against strings in Python 3.
            key, value = item
            if key.condition is None:
                return ''
            return key.condition
        for key, value in sorted(may_requires.items(), key=sort_key):
            may_requirement = OrderedDict((('requires', value),))
            if key.extra:
                may_requirement['extra'] = key.extra
            if key.condition:
                may_requirement['environment'] = key.condition
            metadata['run_requires'].append(may_requirement)
        if not 'extras' in metadata:
            metadata['extras'] = []
        metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None):
    """
    Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict.
    The description is included under the key ['description'] rather than
    being written to a separate file.
    path: path to PKG-INFO file
    distribution: optional distutils Distribution()
    """
    metadata = OrderedDefaultDict(lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict)))
    metadata["generator"] = "bdist_wheel (" + wheel_version + ")"
    try:
        unicode
        pkg_info = read_pkg_info(path)
    except NameError:
        with open(path, 'rb') as pkg_info_file:
            pkg_info = email.parser.Parser().parsestr(pkg_info_file.read().decode('utf-8'))
    description = None
    if pkg_info['Summary']:
        metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary')
        del pkg_info['Summary']
    if pkg_info['Description']:
        description = dedent_description(pkg_info)
        del pkg_info['Description']
    else:
        payload = pkg_info.get_payload()
        if isinstance(payload, bytes):
            # Avoid a Python 2 Unicode error.
            # We still suffer ? glyphs on Python 3.
            payload = payload.decode('utf-8')
        if payload:
            description = payload
    if description:
        pkg_info['description'] = description
    for key in sorted(unique(k.lower() for k in pkg_info.keys())):
        low_key = key.replace('-', '_')
        if low_key in SKIP_FIELDS:
            continue
        if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN':
            continue
        if low_key in sorted(PLURAL_FIELDS):
            metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key)
        elif low_key == "requires_dist":
            handle_requires(metadata, pkg_info, key)
        elif low_key == 'provides_extra':
            if not 'extras' in metadata:
                metadata['extras'] = []
            metadata['extras'].extend(pkg_info.get_all(key))
        elif low_key == 'home_page':
            metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]}
        elif low_key == 'keywords':
            metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key])
        else:
            metadata[low_key] = pkg_info[key]
    metadata['metadata_version'] = METADATA_VERSION
    if 'extras' in metadata:
        metadata['extras'] = sorted(set(metadata['extras']))
    # include more information if distribution is available
    if distribution:
        for requires, attr in (('test_requires', 'tests_require'),):
            try:
                requirements = getattr(distribution, attr)
                if isinstance(requirements, list):
                    new_requirements = sorted(convert_requirements(requirements))
                    metadata[requires] = [{'requires':new_requirements}]
            except AttributeError:
                pass
    # handle contacts
    contacts = []
    for contact_type, role in CONTACT_FIELDS:
        contact = OrderedDict()
        for key in sorted(contact_type):
            if contact_type[key] in metadata:
                contact[key] = metadata.pop(contact_type[key])
        if contact:
            contact['role'] = role
            contacts.append(contact)
    if contacts:
        metadata['extensions']['python.details']['contacts'] = contacts
    # convert entry points to exports
    try:
        with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file:
            ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read())
        exports = OrderedDict()
        for group, items in sorted(ep_map.items()):
            exports[group] = OrderedDict()
            for item in sorted(map(str, items.values())):
                name, export = item.split(' = ', 1)
                exports[group][name] = export
        if exports:
            metadata['extensions']['python.exports'] = exports
    except IOError:
        pass
    # copy console_scripts entry points to commands
    if 'python.exports' in metadata['extensions']:
        for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'),
                                         ('gui_scripts', 'wrap_gui')):
            if ep_script in metadata['extensions']['python.exports']:
                metadata['extensions']['python.commands'][wrap_script] = \
                    metadata['extensions']['python.exports'][ep_script]
    return metadata
def requires_to_requires_dist(requirement):
    """Compose the version predicates for requirement in PEP 345 fashion."""
    requires_dist = []
    for op, ver in requirement.specs:
        requires_dist.append(op + ver)
    if not requires_dist:
        return ''
    return " (%s)" % ','.join(requires_dist)
def convert_requirements(requirements):
    """Yield Requires-Dist: strings for parsed requirements strings."""
    for req in requirements:
        parsed_requirement = pkg_resources.Requirement.parse(req)
        spec = requires_to_requires_dist(parsed_requirement)
        extras = ",".join(parsed_requirement.extras)
        if extras:
            extras = "[%s]" % extras
        yield (parsed_requirement.project_name + extras + spec)
def generate_requirements(extras_require):
    """
    Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
    and ('Provides-Extra', 'extra') tuples.
    extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
    using the empty extra {'': [requirements]} to hold install_requires.
    """
    for extra, depends in extras_require.items():
        condition = ''
        if extra and ':' in extra:  # setuptools extra:condition syntax
            extra, condition = extra.split(':', 1)
            extra = pkg_resources.safe_extra(extra)
        if extra:
            yield ('Provides-Extra', extra)
            if condition:
                condition += " and "
            condition += "extra == '%s'" % extra
        if condition:
            condition = '; ' + condition
        for new_req in convert_requirements(depends):
            yield ('Requires-Dist', new_req + condition)
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
    """
    Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
    old-draft Metadata 2.0 format.
    """
    pkg_info = read_pkg_info(pkginfo_path)
    pkg_info.replace_header('Metadata-Version', '2.0')
    requires_path = os.path.join(egg_info_path, 'requires.txt')
    if os.path.exists(requires_path):
        with open(requires_path) as requires_file:
            requires = requires_file.read()
        for extra, reqs in sorted(pkg_resources.split_sections(requires),
                                  key=lambda x: x[0] or ''):
            for item in generate_requirements({extra: reqs}):
                pkg_info[item[0]] = item[1]
    description = pkg_info['Description']
    if description:
        pkg_info.set_payload(dedent_description(pkg_info))
        del pkg_info['Description']
    return pkg_info
def pkginfo_unicode(pkg_info, field):
    """Hack to coax Unicode out of an email Message() - Python 3.3+"""
    text = pkg_info[field]
    field = field.lower()
    if not isinstance(text, str):
        if not hasattr(pkg_info, 'raw_items'):  # Python 3.2
            return str(text)
        for item in pkg_info.raw_items():
            if item[0].lower() == field:
                text = item[1].encode('ascii', 'surrogateescape')\
                                      .decode('utf-8')
                break
    return text
def dedent_description(pkg_info):
    """
    Dedent and convert pkg_info['Description'] to Unicode.
    """
    description = pkg_info['Description']
    # Python 3 Unicode handling, sorta.
    surrogates = False
    if not isinstance(description, str):
        surrogates = True
        description = pkginfo_unicode(pkg_info, 'Description')
    description_lines = description.splitlines()
    description_dedent = '\n'.join(
            # if the first line of long_description is blank,
            # the first line here will be indented.
            (description_lines[0].lstrip(),
             textwrap.dedent('\n'.join(description_lines[1:])),
             '\n'))
    if surrogates:
        description_dedent = description_dedent\
                .encode("utf8")\
                .decode("ascii", "surrogateescape")
    return description_dedent
if __name__ == "__main__":
    import sys, pprint
    pprint.pprint(pkginfo_to_dict(sys.argv[1]))
 | 
	mit | -7,892,011,049,403,554,000 | 34.063063 | 102 | 0.593011 | false | 
| 
	nicholaschris/landsatpy | 
	stuff.py | 
	1 | 
	1864 | 
	import cloud_detection_new as cloud_detection
from matplotlib import pyplot as plt
import views
from skimage import exposure
nir = cloud_detection.get_nir()[0:600,2000:2600]
red = cloud_detection.get_red()[0:600,2000:2600]
green = cloud_detection.get_green()[0:600,2000:2600]
blue = cloud_detection.get_blue()[0:600,2000:2600] # or use coastal
coastal = cloud_detection.get_coastal()[0:600,2000:2600]
marine_shadow_index = (green-blue)/(green+blue)
img = views.create_composite(red, green, blue)
img_rescale = exposure.rescale_intensity(img, in_range=(0, 90))
plt.rcParams['savefig.facecolor'] = "0.8"
vmin, vmax=0.0,0.1
def example_plot(ax, data, fontsize=12):
     ax.imshow(data, vmin=vmin, vmax=vmax)
     ax.locator_params(nbins=3)
     ax.set_xlabel('x-label', fontsize=fontsize)
     ax.set_ylabel('y-label', fontsize=fontsize)
     ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig = plt.figure
ax1=plt.subplot(243)
ax2=plt.subplot(244)
ax3=plt.subplot(247)
ax4=plt.subplot(248)
ax5=plt.subplot(121)
a_coastal = coastal[500:600, 500:600]
a_blue = blue[500:600, 500:600]
a_green = green[500:600, 500:600]
a_red = red[500:600, 500:600]
a_nir = nir[500:600, 500:600]
a_img = img[500:600, 500:600]
spec1 = [a_coastal[60, 60], a_blue[60, 60], a_green[60, 60], a_red[60, 60], a_nir[60, 60]]
b_coastal = coastal[200:300, 100:200]
b_blue = blue[200:300, 100:200]
b_green = green[200:300, 100:200]
b_red = red[200:300, 100:200]
b_nir = nir[200:300, 100:200]
b_img = img[200:300, 100:200]
example_plot(ax1, coastal)
example_plot(ax2, blue)
example_plot(ax3, green)
example_plot(ax4, red)
ax5.imshow(img)
# plt.tight_layout()
plt.close('all')
spec = [b_coastal[60, 60], b_blue[60, 60], b_green[60, 60], b_red[60, 60], b_nir[60, 60]]
plt.plot(spec, 'k*-')
plt.plot(spec1, 'k.-')
plt.close('all')
cbg = (coastal+blue+green)/3
plt.imshow(cbg/red) | 
	mit | -3,928,246,446,012,056,600 | 27.692308 | 90 | 0.689914 | false | 
| 
	koyuawsmbrtn/eclock | 
	windows/kivy/kivy/core/image/img_dds.py | 
	54 | 
	1048 | 
	'''
DDS: DDS image loader
'''
__all__ = ('ImageLoaderDDS', )
from kivy.lib.ddsfile import DDSFile
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderDDS(ImageLoaderBase):
    @staticmethod
    def extensions():
        return ('dds', )
    def load(self, filename):
        try:
            dds = DDSFile(filename=filename)
        except:
            Logger.warning('Image: Unable to load image <%s>' % filename)
            raise
        self.filename = filename
        width, height = dds.size
        im = ImageData(width, height, dds.dxt, dds.images[0], source=filename,
                       flip_vertical=False)
        if len(dds.images) > 1:
            images = dds.images
            images_size = dds.images_size
            for index in range(1, len(dds.images)):
                w, h = images_size[index]
                data = images[index]
                im.add_mipmap(index, w, h, data)
        return [im]
# register
ImageLoader.register(ImageLoaderDDS)
 | 
	gpl-2.0 | -1,542,359,741,957,030,400 | 25.871795 | 78 | 0.583015 | false | 
| 
	dudonwai/dudonsblog | 
	Lib/site-packages/django/contrib/gis/db/models/sql/conversion.py | 
	308 | 
	2015 | 
	"""
This module holds simple classes to convert geospatial values from the
database.
"""
from django.contrib.gis.db.models.fields import GeoSelectFormatMixin
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
class BaseField(object):
    empty_strings_allowed = True
    def get_db_converters(self, connection):
        return [self.from_db_value]
    def select_format(self, compiler, sql, params):
        return sql, params
class AreaField(BaseField):
    "Wrapper for Area values."
    def __init__(self, area_att):
        self.area_att = area_att
    def from_db_value(self, value, expression, connection, context):
        if value is not None:
            value = Area(**{self.area_att: value})
        return value
    def get_internal_type(self):
        return 'AreaField'
class DistanceField(BaseField):
    "Wrapper for Distance values."
    def __init__(self, distance_att):
        self.distance_att = distance_att
    def from_db_value(self, value, expression, connection, context):
        if value is not None:
            value = Distance(**{self.distance_att: value})
        return value
    def get_internal_type(self):
        return 'DistanceField'
class GeomField(GeoSelectFormatMixin, BaseField):
    """
    Wrapper for Geometry values.  It is a lightweight alternative to
    using GeometryField (which requires an SQL query upon instantiation).
    """
    # Hacky marker for get_db_converters()
    geom_type = None
    def from_db_value(self, value, expression, connection, context):
        if value is not None:
            value = Geometry(value)
        return value
    def get_internal_type(self):
        return 'GeometryField'
class GMLField(BaseField):
    """
    Wrapper for GML to be used by Oracle to ensure Database.LOB conversion.
    """
    def get_internal_type(self):
        return 'GMLField'
    def from_db_value(self, value, expression, connection, context):
        return value
 | 
	mit | -1,638,714,960,052,173,300 | 25.866667 | 75 | 0.667494 | false | 
| 
	vedujoshi/tempest | 
	tempest/api/identity/base.py | 
	2 | 
	11762 | 
	# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
import tempest.test
CONF = config.CONF
class BaseIdentityTest(tempest.test.BaseTestCase):
    @classmethod
    def setup_credentials(cls):
        # Create no network resources for these test.
        cls.set_network_resources()
        super(BaseIdentityTest, cls).setup_credentials()
    @classmethod
    def disable_user(cls, user_name):
        user = cls.get_user_by_name(user_name)
        cls.users_client.update_user_enabled(user['id'], enabled=False)
    @classmethod
    def disable_tenant(cls, tenant_name):
        tenant = cls.get_tenant_by_name(tenant_name)
        cls.tenants_client.update_tenant(tenant['id'], enabled=False)
    @classmethod
    def get_user_by_name(cls, name, domain_id=None):
        if domain_id:
            params = {'domain_id': domain_id}
            users = cls.users_client.list_users(**params)['users']
        else:
            users = cls.users_client.list_users()['users']
        user = [u for u in users if u['name'] == name]
        if user:
            return user[0]
    @classmethod
    def get_tenant_by_name(cls, name):
        try:
            tenants = cls.tenants_client.list_tenants()['tenants']
        except AttributeError:
            tenants = cls.projects_client.list_projects()['projects']
        tenant = [t for t in tenants if t['name'] == name]
        if tenant:
            return tenant[0]
    @classmethod
    def get_role_by_name(cls, name):
        roles = cls.roles_client.list_roles()['roles']
        role = [r for r in roles if r['name'] == name]
        if role:
            return role[0]
    def create_test_user(self, **kwargs):
        if kwargs.get('password', None) is None:
            kwargs['password'] = data_utils.rand_password()
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name('test_user')
        if 'email' not in kwargs:
            kwargs['email'] = kwargs['name'] + '@testmail.tm'
        user = self.users_client.create_user(**kwargs)['user']
        # Delete the user at the end of the test
        self.addCleanup(
            test_utils.call_and_ignore_notfound_exc,
            self.users_client.delete_user, user['id'])
        return user
    def setup_test_role(self, name=None, domain_id=None):
        """Set up a test role."""
        params = {'name': name or data_utils.rand_name('test_role')}
        if domain_id:
            params['domain_id'] = domain_id
        role = self.roles_client.create_role(**params)['role']
        # Delete the role at the end of the test
        self.addCleanup(
            test_utils.call_and_ignore_notfound_exc,
            self.roles_client.delete_role, role['id'])
        return role
class BaseIdentityV2Test(BaseIdentityTest):
    credentials = ['primary']
    # identity v2 tests should obtain tokens and create accounts via v2
    # regardless of the configured CONF.identity.auth_version
    identity_version = 'v2'
    @classmethod
    def setup_clients(cls):
        super(BaseIdentityV2Test, cls).setup_clients()
        cls.non_admin_client = cls.os_primary.identity_public_client
        cls.non_admin_token_client = cls.os_primary.token_client
        cls.non_admin_tenants_client = cls.os_primary.tenants_public_client
        cls.non_admin_users_client = cls.os_primary.users_public_client
class BaseIdentityV2AdminTest(BaseIdentityV2Test):
    credentials = ['primary', 'admin']
    # NOTE(andreaf) Identity tests work with credentials, so it is safer
    # for them to always use disposable credentials. Forcing dynamic creds
    # on regular identity tests would be however to restrictive, since it
    # would prevent any identity test from being executed against clouds where
    # admin credentials are not available.
    # Since All admin tests require admin credentials to be
    # executed, so this will not impact the ability to execute tests.
    force_tenant_isolation = True
    @classmethod
    def skip_checks(cls):
        super(BaseIdentityV2AdminTest, cls).skip_checks()
        if not CONF.identity_feature_enabled.api_v2_admin:
            raise cls.skipException('Identity v2 admin not available')
    @classmethod
    def setup_clients(cls):
        super(BaseIdentityV2AdminTest, cls).setup_clients()
        cls.client = cls.os_admin.identity_client
        cls.non_admin_client = cls.os_primary.identity_client
        cls.token_client = cls.os_admin.token_client
        cls.tenants_client = cls.os_admin.tenants_client
        cls.non_admin_tenants_client = cls.os_primary.tenants_client
        cls.roles_client = cls.os_admin.roles_client
        cls.non_admin_roles_client = cls.os_primary.roles_client
        cls.users_client = cls.os_admin.users_client
        cls.non_admin_users_client = cls.os_primary.users_client
        cls.services_client = cls.os_admin.identity_services_client
        cls.endpoints_client = cls.os_admin.endpoints_client
    @classmethod
    def resource_setup(cls):
        super(BaseIdentityV2AdminTest, cls).resource_setup()
        cls.projects_client = cls.tenants_client
    def setup_test_user(self, password=None):
        """Set up a test user."""
        tenant = self.setup_test_tenant()
        user = self.create_test_user(tenantId=tenant['id'], password=password)
        return user
    def setup_test_tenant(self, **kwargs):
        """Set up a test tenant."""
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name('test_tenant')
        if 'description' not in kwargs:
            kwargs['description'] = data_utils.rand_name('desc')
        tenant = self.projects_client.create_tenant(**kwargs)['tenant']
        # Delete the tenant at the end of the test
        self.addCleanup(
            test_utils.call_and_ignore_notfound_exc,
            self.tenants_client.delete_tenant, tenant['id'])
        return tenant
class BaseIdentityV3Test(BaseIdentityTest):
    credentials = ['primary']
    # identity v3 tests should obtain tokens and create accounts via v3
    # regardless of the configured CONF.identity.auth_version
    identity_version = 'v3'
    @classmethod
    def setup_clients(cls):
        super(BaseIdentityV3Test, cls).setup_clients()
        cls.non_admin_client = cls.os_primary.identity_v3_client
        cls.non_admin_users_client = cls.os_primary.users_v3_client
        cls.non_admin_token = cls.os_primary.token_v3_client
        cls.non_admin_projects_client = cls.os_primary.projects_client
        cls.non_admin_catalog_client = cls.os_primary.catalog_client
        cls.non_admin_versions_client =\
            cls.os_primary.identity_versions_v3_client
class BaseIdentityV3AdminTest(BaseIdentityV3Test):
    credentials = ['primary', 'admin']
    # NOTE(andreaf) Identity tests work with credentials, so it is safer
    # for them to always use disposable credentials. Forcing dynamic creds
    # on regular identity tests would be however to restrictive, since it
    # would prevent any identity test from being executed against clouds where
    # admin credentials are not available.
    # Since All admin tests require admin credentials to be
    # executed, so this will not impact the ability to execute tests.
    force_tenant_isolation = True
    @classmethod
    def setup_clients(cls):
        super(BaseIdentityV3AdminTest, cls).setup_clients()
        cls.client = cls.os_admin.identity_v3_client
        cls.domains_client = cls.os_admin.domains_client
        cls.users_client = cls.os_admin.users_v3_client
        cls.trusts_client = cls.os_admin.trusts_client
        cls.roles_client = cls.os_admin.roles_v3_client
        cls.inherited_roles_client = cls.os_admin.inherited_roles_client
        cls.token = cls.os_admin.token_v3_client
        cls.endpoints_client = cls.os_admin.endpoints_v3_client
        cls.regions_client = cls.os_admin.regions_client
        cls.services_client = cls.os_admin.identity_services_v3_client
        cls.policies_client = cls.os_admin.policies_client
        cls.creds_client = cls.os_admin.credentials_client
        cls.groups_client = cls.os_admin.groups_client
        cls.projects_client = cls.os_admin.projects_client
        cls.role_assignments = cls.os_admin.role_assignments_client
        cls.oauth_consumers_client = cls.os_admin.oauth_consumers_client
        cls.oauth_token_client = cls.os_admin.oauth_token_client
        cls.domain_config_client = cls.os_admin.domain_config_client
        cls.endpoint_filter_client = cls.os_admin.endpoint_filter_client
        cls.endpoint_groups_client = cls.os_admin.endpoint_groups_client
        if CONF.identity.admin_domain_scope:
            # NOTE(andreaf) When keystone policy requires it, the identity
            # admin clients for these tests shall use 'domain' scoped tokens.
            # As the client manager is already created by the base class,
            # we set the scope for the inner auth provider.
            cls.os_admin.auth_provider.scope = 'domain'
    @classmethod
    def disable_user(cls, user_name, domain_id=None):
        user = cls.get_user_by_name(user_name, domain_id)
        cls.users_client.update_user(user['id'], name=user_name, enabled=False)
    @classmethod
    def create_domain(cls, **kwargs):
        """Create a domain."""
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name('test_domain')
        if 'description' not in kwargs:
            kwargs['description'] = data_utils.rand_name('desc')
        domain = cls.domains_client.create_domain(**kwargs)['domain']
        return domain
    def delete_domain(self, domain_id):
        # NOTE(mpavlase) It is necessary to disable the domain before deleting
        # otherwise it raises Forbidden exception
        self.domains_client.update_domain(domain_id, enabled=False)
        self.domains_client.delete_domain(domain_id)
    def setup_test_user(self, password=None):
        """Set up a test user."""
        project = self.setup_test_project()
        user = self.create_test_user(project_id=project['id'],
                                     password=password)
        return user
    def setup_test_project(self, **kwargs):
        """Set up a test project."""
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name('test_project')
        if 'description' not in kwargs:
            kwargs['description'] = data_utils.rand_name('test_description')
        project = self.projects_client.create_project(**kwargs)['project']
        # Delete the project at the end of the test
        self.addCleanup(
            test_utils.call_and_ignore_notfound_exc,
            self.projects_client.delete_project, project['id'])
        return project
    def setup_test_domain(self):
        """Set up a test domain."""
        domain = self.create_domain()
        # Delete the domain at the end of the test
        self.addCleanup(
            test_utils.call_and_ignore_notfound_exc,
            self.delete_domain, domain['id'])
        return domain
 | 
	apache-2.0 | 2,494,243,205,311,572,500 | 39.982578 | 79 | 0.655501 | false | 
| 
	JamesShaeffer/QGIS | 
	tests/src/python/test_qgsmessagelog.py | 
	30 | 
	3449 | 
	# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMessageLog.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/06/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis  # NOQA
from qgis.core import (Qgis,
                       QgsApplication,
                       QgsMessageLog,
                       QgsMessageLogNotifyBlocker)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
app = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsMessageLog(unittest.TestCase):
    def testSignals(self):
        app_log = QgsApplication.messageLog()
        # signals should be emitted by application log
        app_spy = QSignalSpy(app_log.messageReceived)
        app_spy_received = QSignalSpy(app_log.messageReceived[bool])
        QgsMessageLog.logMessage('test', 'tag', Qgis.Info, notifyUser=True)
        self.assertEqual(len(app_spy), 1)
        self.assertEqual(app_spy[-1], ['test', 'tag', Qgis.Info])
        # info message, so messageReceived(bool) should not be emitted
        self.assertEqual(len(app_spy_received), 0)
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
        self.assertEqual(len(app_spy), 2)
        self.assertEqual(app_spy[-1], ['test', 'tag', Qgis.Warning])
        # warning message, so messageReceived(bool) should be emitted
        self.assertEqual(len(app_spy_received), 1)
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=False)
        self.assertEqual(len(app_spy), 3)
        # notifyUser was False
        self.assertEqual(len(app_spy_received), 1)
    def testBlocker(self):
        app_log = QgsApplication.messageLog()
        spy = QSignalSpy(app_log.messageReceived)
        spy_received = QSignalSpy(app_log.messageReceived[bool])
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
        self.assertEqual(len(spy), 1)
        self.assertEqual(spy[-1], ['test', 'tag', Qgis.Warning])
        self.assertEqual(len(spy_received), 1)
        # block notifications
        b = QgsMessageLogNotifyBlocker()
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
        self.assertEqual(len(spy), 2)  # should not be blocked
        self.assertEqual(len(spy_received), 1)  # should be blocked
        # another blocker
        b2 = QgsMessageLogNotifyBlocker()
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
        self.assertEqual(len(spy), 3)  # should not be blocked
        self.assertEqual(len(spy_received), 1)  # should be blocked
        del b
        # still blocked because of b2
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
        self.assertEqual(len(spy), 4)  # should not be blocked
        self.assertEqual(len(spy_received), 1)  # should be blocked
        del b2
        # not blocked
        QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
        self.assertEqual(len(spy), 5)  # should not be blocked
        self.assertEqual(len(spy_received), 2)  # should not be blocked
if __name__ == '__main__':
    unittest.main()
 | 
	gpl-2.0 | -103,974,553,227,338,910 | 36.48913 | 79 | 0.655552 | false | 
| 
	hophacker/bitcoin_malleability | 
	contrib/bitrpc/bitrpc.py | 
	46 | 
	9207 | 
	from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
    access = ServiceProxy("http://127.0.0.1:8332")
else:
    access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
    try:
        path = raw_input("Enter destination path/filename: ")
        print access.backupwallet(path)
    except:
        print "\n---An error occurred---\n"
elif cmd == "getaccount":
    try:
        addr = raw_input("Enter a Bitcoin address: ")
        print access.getaccount(addr)
    except:
        print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
    try:
        acct = raw_input("Enter an account name: ")
        print access.getaccountaddress(acct)
    except:
        print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
    try:
        acct = raw_input("Enter an account name: ")
        print access.getaddressesbyaccount(acct)
    except:
        print "\n---An error occurred---\n"
elif cmd == "getbalance":
    try:
        acct = raw_input("Enter an account (optional): ")
        mc = raw_input("Minimum confirmations (optional): ")
        try:
            print access.getbalance(acct, mc)
        except:
            print access.getbalance()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
    try:
        height = raw_input("Height: ")
        print access.getblockbycount(height)
    except:
        print "\n---An error occurred---\n"
elif cmd == "getblockcount":
    try:
        print access.getblockcount()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
    try:
        print access.getblocknumber()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
    try:
        print access.getconnectioncount()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
    try:
        print access.getdifficulty()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getgenerate":
    try:
        print access.getgenerate()
    except:
        print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
    try:
        print access.gethashespersec()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getinfo":
    try:
        print access.getinfo()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
    try:
        acct = raw_input("Enter an account name: ")
        try:
            print access.getnewaddress(acct)
        except:
            print access.getnewaddress()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
    try:
        acct = raw_input("Enter an account (optional): ")
        mc = raw_input("Minimum confirmations (optional): ")
        try:
            print access.getreceivedbyaccount(acct, mc)
        except:
            print access.getreceivedbyaccount()
    except:
        print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
    try:
        addr = raw_input("Enter a Bitcoin address (optional): ")
        mc = raw_input("Minimum confirmations (optional): ")
        try:
            print access.getreceivedbyaddress(addr, mc)
        except:
            print access.getreceivedbyaddress()
    except:
        print "\n---An error occurred---\n"
elif cmd == "gettransaction":
    try:
        txid = raw_input("Enter a transaction ID: ")
        print access.gettransaction(txid)
    except:
        print "\n---An error occurred---\n"
elif cmd == "getwork":
    try:
        data = raw_input("Data (optional): ")
        try:
            print access.gettransaction(data)
        except:
            print access.gettransaction()
    except:
        print "\n---An error occurred---\n"
elif cmd == "help":
    try:
        cmd = raw_input("Command (optional): ")
        try:
            print access.help(cmd)
        except:
            print access.help()
    except:
        print "\n---An error occurred---\n"
elif cmd == "listaccounts":
    try:
        mc = raw_input("Minimum confirmations (optional): ")
        try:
            print access.listaccounts(mc)
        except:
            print access.listaccounts()
    except:
        print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
    try:
        mc = raw_input("Minimum confirmations (optional): ")
        incemp = raw_input("Include empty? (true/false, optional): ")
        try:
            print access.listreceivedbyaccount(mc, incemp)
        except:
            print access.listreceivedbyaccount()
    except:
        print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
    try:
        mc = raw_input("Minimum confirmations (optional): ")
        incemp = raw_input("Include empty? (true/false, optional): ")
        try:
            print access.listreceivedbyaddress(mc, incemp)
        except:
            print access.listreceivedbyaddress()
    except:
        print "\n---An error occurred---\n"
elif cmd == "listtransactions":
    try:
        acct = raw_input("Account (optional): ")
        count = raw_input("Number of transactions (optional): ")
        frm = raw_input("Skip (optional):")
        try:
            print access.listtransactions(acct, count, frm)
        except:
            print access.listtransactions()
    except:
        print "\n---An error occurred---\n"
elif cmd == "move":
    try:
        frm = raw_input("From: ")
        to = raw_input("To: ")
        amt = raw_input("Amount:")
        mc = raw_input("Minimum confirmations (optional): ")
        comment = raw_input("Comment (optional): ")
        try:
            print access.move(frm, to, amt, mc, comment)
        except:
            print access.move(frm, to, amt)
    except:
        print "\n---An error occurred---\n"
elif cmd == "sendfrom":
    try:
        frm = raw_input("From: ")
        to = raw_input("To: ")
        amt = raw_input("Amount:")
        mc = raw_input("Minimum confirmations (optional): ")
        comment = raw_input("Comment (optional): ")
        commentto = raw_input("Comment-to (optional): ")
        try:
            print access.sendfrom(frm, to, amt, mc, comment, commentto)
        except:
            print access.sendfrom(frm, to, amt)
    except:
        print "\n---An error occurred---\n"
elif cmd == "sendmany":
    try:
        frm = raw_input("From: ")
        to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
        mc = raw_input("Minimum confirmations (optional): ")
        comment = raw_input("Comment (optional): ")
        try:
            print access.sendmany(frm,to,mc,comment)
        except:
            print access.sendmany(frm,to)
    except:
        print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
    try:
        to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
        amt = raw_input("Amount:")
        comment = raw_input("Comment (optional): ")
        commentto = raw_input("Comment-to (optional): ")
        try:
            print access.sendtoaddress(to,amt,comment,commentto)
        except:
            print access.sendtoaddress(to,amt)
    except:
        print "\n---An error occurred---\n"
elif cmd == "setaccount":
    try:
        addr = raw_input("Address: ")
        acct = raw_input("Account:")
        print access.setaccount(addr,acct)
    except:
        print "\n---An error occurred---\n"
elif cmd == "setgenerate":
    try:
        gen= raw_input("Generate? (true/false): ")
        cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
        try:
            print access.setgenerate(gen, cpus)
        except:
            print access.setgenerate(gen)
    except:
        print "\n---An error occurred---\n"
elif cmd == "settxfee":
    try:
        amt = raw_input("Amount:")
        print access.settxfee(amt)
    except:
        print "\n---An error occurred---\n"
elif cmd == "stop":
    try:
        print access.stop()
    except:
        print "\n---An error occurred---\n"
elif cmd == "validateaddress":
    try:
        addr = raw_input("Address: ")
        print access.validateaddress(addr)
    except:
        print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
    try:
        pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
        access.walletpassphrase(pwd, 60)
        print "\n---Wallet unlocked---\n"
    except:
        print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
    try:
        pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
        pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
        access.walletpassphrasechange(pwd, pwd2)
        print
        print "\n---Passphrase changed---\n"
    except:
        print
        print "\n---An error occurred---\n"
        print
else:
    print "Command not found or not supported"
 | 
	mit | 148,013,364,626,363,460 | 27.329231 | 80 | 0.568481 | false | 
| 
	tinloaf/home-assistant | 
	homeassistant/components/microsoft_face.py | 
	4 | 
	10344 | 
	"""
Support for Microsoft face recognition.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/microsoft_face/
"""
import asyncio
import json
import logging
import aiohttp
from aiohttp.hdrs import CONTENT_TYPE
import async_timeout
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_TIMEOUT, ATTR_NAME
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CAMERA_ENTITY = 'camera_entity'
ATTR_GROUP = 'group'
ATTR_PERSON = 'person'
CONF_AZURE_REGION = 'azure_region'
DATA_MICROSOFT_FACE = 'microsoft_face'
DEFAULT_TIMEOUT = 10
DEPENDENCIES = ['camera']
DOMAIN = 'microsoft_face'
FACE_API_URL = "api.cognitive.microsoft.com/face/v1.0/{0}"
SERVICE_CREATE_GROUP = 'create_group'
SERVICE_CREATE_PERSON = 'create_person'
SERVICE_DELETE_GROUP = 'delete_group'
SERVICE_DELETE_PERSON = 'delete_person'
SERVICE_FACE_PERSON = 'face_person'
SERVICE_TRAIN_GROUP = 'train_group'
CONFIG_SCHEMA = vol.Schema({
    DOMAIN: vol.Schema({
        vol.Required(CONF_API_KEY): cv.string,
        vol.Optional(CONF_AZURE_REGION, default="westus"): cv.string,
        vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
    }),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_GROUP_SERVICE = vol.Schema({
    vol.Required(ATTR_NAME): cv.string,
})
SCHEMA_PERSON_SERVICE = SCHEMA_GROUP_SERVICE.extend({
    vol.Required(ATTR_GROUP): cv.slugify,
})
SCHEMA_FACE_SERVICE = vol.Schema({
    vol.Required(ATTR_PERSON): cv.string,
    vol.Required(ATTR_GROUP): cv.slugify,
    vol.Required(ATTR_CAMERA_ENTITY): cv.entity_id,
})
SCHEMA_TRAIN_SERVICE = vol.Schema({
    vol.Required(ATTR_GROUP): cv.slugify,
})
async def async_setup(hass, config):
    """Set up Microsoft Face."""
    entities = {}
    face = MicrosoftFace(
        hass,
        config[DOMAIN].get(CONF_AZURE_REGION),
        config[DOMAIN].get(CONF_API_KEY),
        config[DOMAIN].get(CONF_TIMEOUT),
        entities
    )
    try:
        # read exists group/person from cloud and create entities
        await face.update_store()
    except HomeAssistantError as err:
        _LOGGER.error("Can't load data from face api: %s", err)
        return False
    hass.data[DATA_MICROSOFT_FACE] = face
    async def async_create_group(service):
        """Create a new person group."""
        name = service.data[ATTR_NAME]
        g_id = slugify(name)
        try:
            await face.call_api(
                'put', "persongroups/{0}".format(g_id), {'name': name})
            face.store[g_id] = {}
            entities[g_id] = MicrosoftFaceGroupEntity(hass, face, g_id, name)
            await entities[g_id].async_update_ha_state()
        except HomeAssistantError as err:
            _LOGGER.error("Can't create group '%s' with error: %s", g_id, err)
    hass.services.async_register(
        DOMAIN, SERVICE_CREATE_GROUP, async_create_group,
        schema=SCHEMA_GROUP_SERVICE)
    async def async_delete_group(service):
        """Delete a person group."""
        g_id = slugify(service.data[ATTR_NAME])
        try:
            await face.call_api('delete', "persongroups/{0}".format(g_id))
            face.store.pop(g_id)
            entity = entities.pop(g_id)
            hass.states.async_remove(entity.entity_id)
        except HomeAssistantError as err:
            _LOGGER.error("Can't delete group '%s' with error: %s", g_id, err)
    hass.services.async_register(
        DOMAIN, SERVICE_DELETE_GROUP, async_delete_group,
        schema=SCHEMA_GROUP_SERVICE)
    async def async_train_group(service):
        """Train a person group."""
        g_id = service.data[ATTR_GROUP]
        try:
            await face.call_api(
                'post', "persongroups/{0}/train".format(g_id))
        except HomeAssistantError as err:
            _LOGGER.error("Can't train group '%s' with error: %s", g_id, err)
    hass.services.async_register(
        DOMAIN, SERVICE_TRAIN_GROUP, async_train_group,
        schema=SCHEMA_TRAIN_SERVICE)
    async def async_create_person(service):
        """Create a person in a group."""
        name = service.data[ATTR_NAME]
        g_id = service.data[ATTR_GROUP]
        try:
            user_data = await face.call_api(
                'post', "persongroups/{0}/persons".format(g_id), {'name': name}
            )
            face.store[g_id][name] = user_data['personId']
            await entities[g_id].async_update_ha_state()
        except HomeAssistantError as err:
            _LOGGER.error("Can't create person '%s' with error: %s", name, err)
    hass.services.async_register(
        DOMAIN, SERVICE_CREATE_PERSON, async_create_person,
        schema=SCHEMA_PERSON_SERVICE)
    async def async_delete_person(service):
        """Delete a person in a group."""
        name = service.data[ATTR_NAME]
        g_id = service.data[ATTR_GROUP]
        p_id = face.store[g_id].get(name)
        try:
            await face.call_api(
                'delete', "persongroups/{0}/persons/{1}".format(g_id, p_id))
            face.store[g_id].pop(name)
            await entities[g_id].async_update_ha_state()
        except HomeAssistantError as err:
            _LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
    hass.services.async_register(
        DOMAIN, SERVICE_DELETE_PERSON, async_delete_person,
        schema=SCHEMA_PERSON_SERVICE)
    async def async_face_person(service):
        """Add a new face picture to a person."""
        g_id = service.data[ATTR_GROUP]
        p_id = face.store[g_id].get(service.data[ATTR_PERSON])
        camera_entity = service.data[ATTR_CAMERA_ENTITY]
        camera = hass.components.camera
        try:
            image = await camera.async_get_image(hass, camera_entity)
            await face.call_api(
                'post',
                "persongroups/{0}/persons/{1}/persistedFaces".format(
                    g_id, p_id),
                image.content,
                binary=True
            )
        except HomeAssistantError as err:
            _LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
    hass.services.async_register(
        DOMAIN, SERVICE_FACE_PERSON, async_face_person,
        schema=SCHEMA_FACE_SERVICE)
    return True
class MicrosoftFaceGroupEntity(Entity):
    """Person-Group state/data Entity."""
    def __init__(self, hass, api, g_id, name):
        """Initialize person/group entity."""
        self.hass = hass
        self._api = api
        self._id = g_id
        self._name = name
    @property
    def name(self):
        """Return the name of the entity."""
        return self._name
    @property
    def entity_id(self):
        """Return entity id."""
        return "{0}.{1}".format(DOMAIN, self._id)
    @property
    def state(self):
        """Return the state of the entity."""
        return len(self._api.store[self._id])
    @property
    def should_poll(self):
        """Return True if entity has to be polled for state."""
        return False
    @property
    def device_state_attributes(self):
        """Return device specific state attributes."""
        attr = {}
        for name, p_id in self._api.store[self._id].items():
            attr[name] = p_id
        return attr
class MicrosoftFace:
    """Microsoft Face api for HomeAssistant."""
    def __init__(self, hass, server_loc, api_key, timeout, entities):
        """Initialize Microsoft Face api."""
        self.hass = hass
        self.websession = async_get_clientsession(hass)
        self.timeout = timeout
        self._api_key = api_key
        self._server_url = "https://{0}.{1}".format(server_loc, FACE_API_URL)
        self._store = {}
        self._entities = entities
    @property
    def store(self):
        """Store group/person data and IDs."""
        return self._store
    async def update_store(self):
        """Load all group/person data into local store."""
        groups = await self.call_api('get', 'persongroups')
        tasks = []
        for group in groups:
            g_id = group['personGroupId']
            self._store[g_id] = {}
            self._entities[g_id] = MicrosoftFaceGroupEntity(
                self.hass, self, g_id, group['name'])
            persons = await self.call_api(
                'get', "persongroups/{0}/persons".format(g_id))
            for person in persons:
                self._store[g_id][person['name']] = person['personId']
            tasks.append(self._entities[g_id].async_update_ha_state())
        if tasks:
            await asyncio.wait(tasks, loop=self.hass.loop)
    async def call_api(self, method, function, data=None, binary=False,
                       params=None):
        """Make an api call."""
        headers = {"Ocp-Apim-Subscription-Key": self._api_key}
        url = self._server_url.format(function)
        payload = None
        if binary:
            headers[CONTENT_TYPE] = "application/octet-stream"
            payload = data
        else:
            headers[CONTENT_TYPE] = "application/json"
            if data is not None:
                payload = json.dumps(data).encode()
            else:
                payload = None
        try:
            with async_timeout.timeout(self.timeout, loop=self.hass.loop):
                response = await getattr(self.websession, method)(
                    url, data=payload, headers=headers, params=params)
                answer = await response.json()
            _LOGGER.debug("Read from microsoft face api: %s", answer)
            if response.status < 300:
                return answer
            _LOGGER.warning("Error %d microsoft face api %s",
                            response.status, response.url)
            raise HomeAssistantError(answer['error']['message'])
        except aiohttp.ClientError:
            _LOGGER.warning("Can't connect to microsoft face api")
        except asyncio.TimeoutError:
            _LOGGER.warning("Timeout from microsoft face api %s", response.url)
        raise HomeAssistantError("Network error on microsoft face api.")
 | 
	apache-2.0 | 8,826,351,870,563,985,000 | 30.730061 | 79 | 0.604215 | false | 
| 
	FNCS/ns-3.26 | 
	examples/wireless/mixed-wireless.py | 
	59 | 
	17198 | 
	# /*
#  * This program is free software; you can redistribute it and/or modify
#  * it under the terms of the GNU General Public License version 2 as
#  * published by the Free Software Foundation;
#  *
#  * This program is distributed in the hope that it will be useful,
#  * but WITHOUT ANY WARRANTY; without even the implied warranty of
#  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  * GNU General Public License for more details.
#  *
#  * You should have received a copy of the GNU General Public License
#  * along with this program; if not, write to the Free Software
#  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#  *
#  */
# 
#  This ns-3 example demonstrates the use of helper functions to ease 
#  the construction of simulation scenarios.  
#  
#  The simulation topology consists of a mixed wired and wireless
#  scenario in which a hierarchical mobility model is used.
# 
#  The simulation layout consists of N backbone routers interconnected
#  by an ad hoc wifi network.
#  Each backbone router also has a local 802.11 network and is connected
#  to a local LAN.  An additional set of(K-1) nodes are connected to
#  this backbone.  Finally, a local LAN is connected to each router
#  on the backbone, with L-1 additional hosts.  
# 
#  The nodes are populated with TCP/IP stacks, and OLSR unicast routing
#  on the backbone.  An example UDP transfer is shown.  The simulator
#  be configured to output tcpdumps or traces from different nodes.
# 
# 
#           +--------------------------------------------------------+
#           |                                                        |
#           |              802.11 ad hoc, ns-2 mobility              | 
#           |                                                        |
#           +--------------------------------------------------------+
#                    |       o o o(N backbone routers)       |
#                +--------+                               +--------+
#      wired LAN | mobile |                     wired LAN | mobile |
#     -----------| router |                    -----------| router |
#                ---------                                ---------
#                    |                                        |
#           +----------------+                       +----------------+
#           |     802.11     |                       |     802.11     |
#           |      net       |                       |       net      |
#           |   K-1 hosts    |                       |   K-1 hosts    |
#           +----------------+                       +----------------+
# 
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
# # 
# #  This function will be used below as a trace sink
# #  
# static void
# CourseChangeCallback(std.string path, Ptr<const MobilityModel> model)
# {
#   Vector position = model.GetPosition();
#   std.cout << "CourseChange " << path << " x=" << position.x << ", y=" << position.y << ", z=" << position.z << std.endl;
# }
def main(argv): 
    # 
    #  First, we initialize a few local variables that control some 
    #  simulation parameters.
    #
    cmd = ns.core.CommandLine()
    cmd.backboneNodes = 10
    cmd.infraNodes = 2
    cmd.lanNodes = 2
    cmd.stopTime = 20
    # 
    #  Simulation defaults are typically set next, before command line
    #  arguments are parsed.
    # 
    ns.core.Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.StringValue("1472"))
    ns.core.Config.SetDefault("ns3::OnOffApplication::DataRate", ns.core.StringValue("100kb/s"))
    # 
    #  For convenience, we add the local variables to the command line argument
    #  system so that they can be overridden with flags such as 
    #  "--backboneNodes=20"
    # 
    
    cmd.AddValue("backboneNodes", "number of backbone nodes")
    cmd.AddValue("infraNodes", "number of leaf nodes")
    cmd.AddValue("lanNodes", "number of LAN nodes")
    cmd.AddValue("stopTime", "simulation stop time(seconds)")
    
    # 
    #  The system global variables and the local values added to the argument
    #  system can be overridden by command line arguments by using this call.
    # 
    cmd.Parse(argv)
    backboneNodes = int(cmd.backboneNodes)
    infraNodes = int(cmd.infraNodes) 
    lanNodes = int(cmd.lanNodes)
    stopTime = int(cmd.stopTime)
    if (stopTime < 10):
        print "Use a simulation stop time >= 10 seconds"
        exit(1)
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #                                                                        # 
    #  Construct the backbone                                                # 
    #                                                                        # 
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    # 
    #  Create a container to manage the nodes of the adhoc(backbone) network.
    #  Later we'll create the rest of the nodes we'll need.
    # 
    backbone = ns.network.NodeContainer()
    backbone.Create(backboneNodes)
    # 
    #  Create the backbone wifi net devices and install them into the nodes in 
    #  our container
    # 
    wifi = ns.wifi.WifiHelper()
    mac = ns.wifi.WifiMacHelper()
    mac.SetType("ns3::AdhocWifiMac")
    wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager",
                                  "DataMode", ns.core.StringValue("OfdmRate54Mbps"))
    wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
    wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
    wifiPhy.SetChannel(wifiChannel.Create())
    backboneDevices = wifi.Install(wifiPhy, mac, backbone)
    # 
    #  Add the IPv4 protocol stack to the nodes in our container
    # 
    print "Enabling OLSR routing on all backbone nodes"
    internet = ns.internet.InternetStackHelper()
    olsr = ns.olsr.OlsrHelper()
    internet.SetRoutingHelper(olsr); # has effect on the next Install ()
    internet.Install(backbone);
    # re-initialize for non-olsr routing.
    # internet.Reset()
    # 
    #  Assign IPv4 addresses to the device drivers(actually to the associated
    #  IPv4 interfaces) we just created.
    # 
    ipAddrs = ns.internet.Ipv4AddressHelper()
    ipAddrs.SetBase(ns.network.Ipv4Address("192.168.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
    ipAddrs.Assign(backboneDevices)
    # 
    #  The ad-hoc network nodes need a mobility model so we aggregate one to 
    #  each of the nodes we just finished building.  
    # 
    mobility = ns.mobility.MobilityHelper()
    mobility.SetPositionAllocator("ns3::GridPositionAllocator",
                                  "MinX", ns.core.DoubleValue(20.0),
                                  "MinY", ns.core.DoubleValue(20.0),
                                  "DeltaX", ns.core.DoubleValue(20.0),
                                  "DeltaY", ns.core.DoubleValue(20.0),
                                  "GridWidth", ns.core.UintegerValue(5),
                                  "LayoutType", ns.core.StringValue("RowFirst"))
    mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel",
                               "Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(-500, 500, -500, 500)),
                               "Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=2]"),
                               "Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.2]"))
    mobility.Install(backbone)
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #                                                                        # 
    #  Construct the LANs                                                    # 
    #                                                                        # 
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #  Reset the address base-- all of the CSMA networks will be in
    #  the "172.16 address space
    ipAddrs.SetBase(ns.network.Ipv4Address("172.16.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
    for i in range(backboneNodes):
        print "Configuring local area network for backbone node ", i
        # 
        #  Create a container to manage the nodes of the LAN.  We need
        #  two containers here; one with all of the new nodes, and one
        #  with all of the nodes including new and existing nodes
        # 
        newLanNodes = ns.network.NodeContainer()
        newLanNodes.Create(lanNodes - 1)
        #  Now, create the container with all nodes on this link
        lan = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), newLanNodes)
        # 
        #  Create the CSMA net devices and install them into the nodes in our 
        #  collection.
        # 
        csma = ns.csma.CsmaHelper()
        csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
        csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)))
        lanDevices = csma.Install(lan)
        # 
        #  Add the IPv4 protocol stack to the new LAN nodes
        # 
        internet.Install(newLanNodes)
        # 
        #  Assign IPv4 addresses to the device drivers(actually to the 
        #  associated IPv4 interfaces) we just created.
        # 
        ipAddrs.Assign(lanDevices)
        # 
        #  Assign a new network prefix for the next LAN, according to the
        #  network mask initialized above
        # 
        ipAddrs.NewNetwork()
        #
        # The new LAN nodes need a mobility model so we aggregate one
        # to each of the nodes we just finished building.
        #
        mobilityLan = ns.mobility.MobilityHelper() 
        positionAlloc = ns.mobility.ListPositionAllocator()
        for j in range(newLanNodes.GetN()):
            positionAlloc.Add(ns.core.Vector(0.0, (j*10 + 10), 0.0))
        mobilityLan.SetPositionAllocator(positionAlloc)
        mobilityLan.PushReferenceMobilityModel(backbone.Get(i))
        mobilityLan.SetMobilityModel("ns3::ConstantPositionMobilityModel")
        mobilityLan.Install(newLanNodes);
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #                                                                        # 
    #  Construct the mobile networks                                         # 
    #                                                                        # 
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #  Reset the address base-- all of the 802.11 networks will be in
    #  the "10.0" address space
    ipAddrs.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
    for i in range(backboneNodes):
        print "Configuring wireless network for backbone node ", i
        # 
        #  Create a container to manage the nodes of the LAN.  We need
        #  two containers here; one with all of the new nodes, and one
        #  with all of the nodes including new and existing nodes
        # 
        stas = ns.network.NodeContainer()
        stas.Create(infraNodes - 1)
        #  Now, create the container with all nodes on this link
        infra = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), stas)
        # 
        #  Create another ad hoc network and devices
        # 
        ssid = ns.wifi.Ssid('wifi-infra' + str(i))
        wifiInfra = ns.wifi.WifiHelper.Default()
        wifiPhy.SetChannel(wifiChannel.Create())
        wifiInfra.SetRemoteStationManager('ns3::ArfWifiManager')
        macInfra = ns.wifi.WifiMacHelper();
        macInfra.SetType("ns3::StaWifiMac",
                         "Ssid", ns.wifi.SsidValue(ssid),
                         "ActiveProbing", ns.core.BooleanValue(False))
        # setup stas
        staDevices = wifiInfra.Install(wifiPhy, macInfra, stas)
        # setup ap.
        macInfra.SetType("ns3::ApWifiMac",
                         "Ssid", ns.wifi.SsidValue(ssid),
                         "BeaconGeneration", ns.core.BooleanValue(True),
                         "BeaconInterval", ns.core.TimeValue(ns.core.Seconds(2.5)))
        apDevices = wifiInfra.Install(wifiPhy, macInfra, backbone.Get(i))
        # Collect all of these new devices
        infraDevices = ns.network.NetDeviceContainer(apDevices, staDevices)
        #  Add the IPv4 protocol stack to the nodes in our container
        # 
        internet.Install(stas)
        # 
        #  Assign IPv4 addresses to the device drivers(actually to the associated
        #  IPv4 interfaces) we just created.
        # 
        ipAddrs.Assign(infraDevices)
        # 
        #  Assign a new network prefix for each mobile network, according to 
        #  the network mask initialized above
        # 
        ipAddrs.NewNetwork()
        # 
        #  The new wireless nodes need a mobility model so we aggregate one 
        #  to each of the nodes we just finished building.
        # 
        subnetAlloc = ns.mobility.ListPositionAllocator()
        for j in range(infra.GetN()):
            subnetAlloc.Add(ns.core.Vector(0.0, j, 0.0))
        mobility.PushReferenceMobilityModel(backbone.Get(i))
        mobility.SetPositionAllocator(subnetAlloc)
        mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel",
                                  "Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(-10, 10, -10, 10)),
                                  "Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=3]"),
                                  "Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.4]"))
        mobility.Install(stas)
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #                                                                        # 
    #  Application configuration                                             # 
    #                                                                        # 
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #  Create the OnOff application to send UDP datagrams of size
    #  210 bytes at a rate of 448 Kb/s, between two nodes
    print "Create Applications."
    port = 9   #  Discard port(RFC 863)
    appSource = ns.network.NodeList.GetNode(backboneNodes)
    lastNodeIndex = backboneNodes + backboneNodes*(lanNodes - 1) + backboneNodes*(infraNodes - 1) - 1
    appSink = ns.network.NodeList.GetNode(lastNodeIndex)
    # Let's fetch the IP address of the last node, which is on Ipv4Interface 1
    remoteAddr = appSink.GetObject(ns.internet.Ipv4.GetTypeId()).GetAddress(1,0).GetLocal()
    onoff = ns.applications.OnOffHelper("ns3::UdpSocketFactory", 
                            ns.network.Address(ns.network.InetSocketAddress(remoteAddr, port)))
    apps = onoff.Install(ns.network.NodeContainer(appSource))
    apps.Start(ns.core.Seconds(3))
    apps.Stop(ns.core.Seconds(stopTime - 1))
    #  Create a packet sink to receive these packets
    sink = ns.applications.PacketSinkHelper("ns3::UdpSocketFactory", 
                                ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port))
    apps = sink.Install(ns.network.NodeContainer(appSink))
    apps.Start(ns.core.Seconds(3))
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    #                                                                        # 
    #  Tracing configuration                                                 # 
    #                                                                        # 
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / 
    print "Configure Tracing."
    csma = ns.csma.CsmaHelper()
    # 
    #  Let's set up some ns-2-like ascii traces, using another helper class
    # 
    ascii = ns.network.AsciiTraceHelper();
    stream = ascii.CreateFileStream("mixed-wireless.tr");
    wifiPhy.EnableAsciiAll(stream);
    csma.EnableAsciiAll(stream);
    internet.EnableAsciiIpv4All(stream);
    #  Csma captures in non-promiscuous mode
    csma.EnablePcapAll("mixed-wireless", False)
    #  Let's do a pcap trace on the backbone devices
    wifiPhy.EnablePcap("mixed-wireless", backboneDevices)
    wifiPhy.EnablePcap("mixed-wireless", appSink.GetId(), 0)
#   #ifdef ENABLE_FOR_TRACING_EXAMPLE
#     Config.Connect("/NodeList/*/$MobilityModel/CourseChange",
#       MakeCallback(&CourseChangeCallback))
#   #endif
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #  
    #                                                                        # 
    #  Run simulation                                                        # 
    #                                                                        # 
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #  
    print "Run Simulation."
    ns.core.Simulator.Stop(ns.core.Seconds(stopTime))
    ns.core.Simulator.Run()
    ns.core.Simulator.Destroy()
if __name__ == '__main__':
    import sys
    main(sys.argv)
 | 
	gpl-2.0 | -2,390,654,260,867,613,000 | 44.020942 | 123 | 0.528434 | false | 
| 
	Monika319/EWEF-1 | 
	Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W2.py | 
	1 | 
	1312 | 
	# -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad5_05f_p2.txt"]
for NazwaPliku in files:
    print NazwaPliku
    Plik=open(NazwaPliku)
    #print DeltaT
    Dane=Plik.readlines()#[4:]
    DeltaT=float(Dane[2].split()[3].replace(",","."))
    #M=len(Dane[4].split())/2
    M=2
    Dane=Dane[5:]
    Plik.close()
    print M
    Ys=[np.zeros(len(Dane)) for i in range(M)]
    for m in range(M):
        for i in range(len(Dane)):
            try:
                Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
            except:
                print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
        #print i, Y[i]
    X=np.zeros_like(Ys[0])
    for i in range(len(X)):
        X[i]=i*DeltaT
    for y in Ys:
        print max(y)-min(y)
    Opis=u"Układ szeregowy\nPołowa częstotliwości rezonansowej"
    Nazwa=u"Z5W2"
    plt.title(u"Przebieg napięciowy\n"+Opis)
    plt.xlabel(u"Czas t [s]")
    plt.ylabel(u"Napięcie [V]")
    plt.plot(X,Ys[0],label=u"Wejście")
    plt.plot(X,Ys[1],label=u"Wyjście")
    plt.grid()
    plt.legend(loc="best")
    plt.savefig(Nazwa + ".png", bbox_inches='tight')
    plt.show()
 | 
	gpl-2.0 | -1,034,745,225,697,455,700 | 23.603774 | 72 | 0.578221 | false | 
| 
	catapult-project/catapult | 
	telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend.py | 
	3 | 
	21781 | 
	# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import logging
import re
import socket
import sys
import six
from py_utils import exc_util
from py_utils import retry_util
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome_inspector import devtools_http
from telemetry.internal.backends.chrome_inspector import inspector_backend
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.backends.chrome_inspector import memory_backend
from telemetry.internal.backends.chrome_inspector import system_info_backend
from telemetry.internal.backends.chrome_inspector import tracing_backend
from telemetry.internal.backends.chrome_inspector import window_manager_backend
from telemetry.internal.platform.tracing_agent import (
    chrome_tracing_devtools_manager)
class TabNotFoundError(exceptions.Error):
  pass
class UnsupportedVersionError(exceptions.Error):
  pass
# Only versions of Chrome from M58 and above are supported. Older versions
# did not support many of the modern features currently in use by Telemetry.
MIN_SUPPORTED_BRANCH_NUMBER = 3029
# The first WebSocket connections or calls against a newly-started
# browser, specifically in Debug builds, can take a long time. Give
# them 60s to complete instead of the default 10s used in many places
# in this file.
_FIRST_CALL_TIMEOUT = 60
# These are possible exceptions raised when the DevTools agent is not ready
# to accept incomming connections.
_DEVTOOLS_CONNECTION_ERRORS = (
    devtools_http.DevToolsClientConnectionError,
    inspector_websocket.WebSocketException,
    socket.error)
def GetDevToolsBackEndIfReady(devtools_port, app_backend, browser_target=None, enable_tracing=True):
  client = _DevToolsClientBackend(app_backend)
  try:
    client.Connect(devtools_port, browser_target, enable_tracing)
    logging.info('DevTools agent ready at %s', client)
  except _DEVTOOLS_CONNECTION_ERRORS as exc:
    logging.info('DevTools agent at %s not ready yet: %s', client, exc)
    client = None
  return client
class FuchsiaBrowserTargetNotFoundException(Exception):
  pass
class _DevToolsClientBackend(object):
  """An object that communicates with Chrome's devtools.
  This class owns a map of InspectorBackends. It is responsible for creating
  and destroying them.
  """
  def __init__(self, app_backend):
    """Create an object able to connect with the DevTools agent.
    Args:
      app_backend: The app that contains the DevTools agent.
    """
    self._app_backend = app_backend
    self._browser_target = None
    self._forwarder = None
    self._devtools_http = None
    self._browser_websocket = None
    self._created = False
    self._local_port = None
    self._remote_port = None
    # Other backends.
    self._tracing_backend = None
    self._memory_backend = None
    self._system_info_backend = None
    self._wm_backend = None
    self._devtools_context_map_backend = _DevToolsContextMapBackend(self)
  def __str__(self):
    s = self.browser_target_url
    if self.local_port != self.remote_port:
      s = '%s (remote=%s)' % (s, self.remote_port)
    return s
  @property
  def local_port(self):
    return self._local_port
  @property
  def remote_port(self):
    return self._remote_port
  @property
  def browser_target_url(self):
    # For Fuchsia browsers, we get the browser_target through a JSON request
    if self.platform_backend.GetOSName() == 'fuchsia':
      resp = self.GetVersion()
      if 'webSocketDebuggerUrl' in resp:
        return resp['webSocketDebuggerUrl']
      else:
        raise FuchsiaBrowserTargetNotFoundException(
            'Could not get the browser target.')
    return 'ws://127.0.0.1:%i%s' % (self._local_port, self._browser_target)
  @property
  def app_backend(self):
    return self._app_backend
  @property
  def platform_backend(self):
    return self._app_backend.platform_backend
  @property
  def supports_overriding_memory_pressure_notifications(self):
    return (
        isinstance(self.app_backend, browser_backend.BrowserBackend)
        and self.app_backend.supports_overriding_memory_pressure_notifications)
  @property
  def is_tracing_running(self):
    return self._tracing_backend.is_tracing_running
  @property
  def has_tracing_client(self):
    return self._tracing_backend != None
  def Connect(self, devtools_port, browser_target, enable_tracing=True):
    try:
      self._Connect(devtools_port, browser_target, enable_tracing)
    except:
      self.Close()  # Close any connections made if failed to connect to all.
      raise
  @retry_util.RetryOnException(devtools_http.DevToolsClientUrlError, retries=3)
  def _WaitForConnection(self, retries=None):
    del retries
    self._devtools_http.Request('')
  def _SetUpPortForwarding(self, devtools_port):
    self._forwarder = self.platform_backend.forwarder_factory.Create(
        local_port=None,  # Forwarder will choose an available port.
        remote_port=devtools_port, reverse=True)
    self._local_port = self._forwarder._local_port
    self._remote_port = self._forwarder._remote_port
    self._devtools_http = devtools_http.DevToolsHttp(self.local_port)
    # For Fuchsia, wait until port forwarding has started working.
    if self.platform_backend.GetOSName() == 'fuchsia':
      self._WaitForConnection()
  def _Connect(self, devtools_port, browser_target, enable_tracing):
    """Attempt to connect to the DevTools client.
    Args:
      devtools_port: The devtools_port uniquely identifies the DevTools agent.
      browser_target: An optional string to override the default path used to
        establish a websocket connection with the browser inspector.
      enable_tracing: Defines if a tracing_client is created.
    Raises:
      Any of _DEVTOOLS_CONNECTION_ERRORS if failed to establish the connection.
    """
    self._browser_target = browser_target or '/devtools/browser'
    self._SetUpPortForwarding(devtools_port)
    # If the agent is not alive and ready, trying to get the branch number will
    # raise a devtools_http.DevToolsClientConnectionError.
    branch_number = self.GetChromeBranchNumber()
    if branch_number < MIN_SUPPORTED_BRANCH_NUMBER:
      raise UnsupportedVersionError(
          'Chrome branch number %d is no longer supported' % branch_number)
    # Ensure that the inspector websocket is ready. This may raise a
    # inspector_websocket.WebSocketException or socket.error if not ready.
    self._browser_websocket = inspector_websocket.InspectorWebsocket()
    self._browser_websocket.Connect(self.browser_target_url, timeout=10)
    chrome_tracing_devtools_manager.RegisterDevToolsClient(self)
    # If there is a trace_config it means that Telemetry has already started
    # Chrome tracing via a startup config. The TracingBackend also needs needs
    # this config to initialize itself correctly.
    if enable_tracing:
      trace_config = (
          self.platform_backend.tracing_controller_backend.GetChromeTraceConfig())
      self._tracing_backend = tracing_backend.TracingBackend(
          self._browser_websocket, trace_config)
  @exc_util.BestEffort
  def Close(self):
    if self._tracing_backend is not None:
      self._tracing_backend.Close()
      self._tracing_backend = None
    if self._memory_backend is not None:
      self._memory_backend.Close()
      self._memory_backend = None
    if self._system_info_backend is not None:
      self._system_info_backend.Close()
      self._system_info_backend = None
    if self._wm_backend is not None:
      self._wm_backend.Close()
      self._wm_backend = None
    if self._devtools_context_map_backend is not None:
      self._devtools_context_map_backend.Clear()
      self._devtools_context_map_backend = None
    # Close the DevTools connections last (in case the backends above still
    # need to interact with them while closing).
    if self._browser_websocket is not None:
      self._browser_websocket.Disconnect()
      self._browser_websocket = None
    if self._devtools_http is not None:
      self._devtools_http.Disconnect()
      self._devtools_http = None
    if self._forwarder is not None:
      self._forwarder.Close()
      self._forwarder = None
  def CloseBrowser(self):
    """Close the browser instance."""
    request = {
        'method': 'Browser.close',
    }
    self._browser_websocket.SyncRequest(request, timeout=60)
  def IsAlive(self):
    """Whether the DevTools server is available and connectable."""
    if self._devtools_http is None:
      return False
    try:
      self._devtools_http.Request('')
    except devtools_http.DevToolsClientConnectionError:
      return False
    else:
      return True
  @decorators.Cache
  def GetVersion(self):
    """Return the version dict as provided by the DevTools agent."""
    return self._devtools_http.RequestJson('version')
  def GetChromeBranchNumber(self):
    # Detect version information.
    resp = self.GetVersion()
    if 'Protocol-Version' in resp:
      if 'Browser' in resp:
        branch_number_match = re.search(r'.+/\d+\.\d+\.(\d+)\.\d+',
                                        resp['Browser'])
      if not branch_number_match and 'User-Agent' in resp:
        branch_number_match = re.search(
            r'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
            resp['User-Agent'])
      if branch_number_match:
        branch_number = int(branch_number_match.group(1))
        if branch_number:
          return branch_number
    # Branch number can't be determined, so fail any branch number checks.
    return 0
  def _ListInspectableContexts(self):
    return self._devtools_http.RequestJson('')
  def RequestNewTab(self, timeout, in_new_window=False, url=None):
    """Creates a new tab, either in new window or current window.
    Returns:
      A dict of a parsed JSON object as returned by DevTools. Example:
      If an error is present, the dict will contain an 'error' key.
      If no error is present, the result is present in the 'result' key:
      {
        "result": {
          "targetId": "id-string"  # This is the ID for the tab.
        }
      }
    """
    request = {
        'method': 'Target.createTarget',
        'params': {
            'url': url if url else 'about:blank',
            'newWindow': in_new_window
        }
    }
    return self._browser_websocket.SyncRequest(request, timeout)
  def CloseTab(self, tab_id, timeout):
    """Closes the tab with the given id.
    Raises:
      devtools_http.DevToolsClientConnectionError
      TabNotFoundError
    """
    try:
      return self._devtools_http.Request(
          'close/%s' % tab_id, timeout=timeout)
    except devtools_http.DevToolsClientUrlError:
      error = TabNotFoundError(
          'Unable to close tab, tab id not found: %s' % tab_id)
      six.reraise(error, None, sys.exc_info()[2])
  def ActivateTab(self, tab_id, timeout):
    """Activates the tab with the given id.
    Raises:
      devtools_http.DevToolsClientConnectionError
      TabNotFoundError
    """
    try:
      return self._devtools_http.Request(
          'activate/%s' % tab_id, timeout=timeout)
    except devtools_http.DevToolsClientUrlError:
      error = TabNotFoundError(
          'Unable to activate tab, tab id not found: %s' % tab_id)
      six.reraise(error, None, sys.exc_info()[2])
  def GetUrl(self, tab_id):
    """Returns the URL of the tab with |tab_id|, as reported by devtools.
    Raises:
      devtools_http.DevToolsClientConnectionError
    """
    for c in self._ListInspectableContexts():
      if c['id'] == tab_id:
        return c['url']
    return None
  def IsInspectable(self, tab_id):
    """Whether the tab with |tab_id| is inspectable, as reported by devtools.
    Raises:
      devtools_http.DevToolsClientConnectionError
    """
    contexts = self._ListInspectableContexts()
    return tab_id in [c['id'] for c in contexts]
  def GetUpdatedInspectableContexts(self):
    """Returns an updated instance of _DevToolsContextMapBackend."""
    contexts = self._ListInspectableContexts()
    self._devtools_context_map_backend._Update(contexts)
    return self._devtools_context_map_backend
  def _CreateWindowManagerBackendIfNeeded(self):
    if not self._wm_backend:
      self._wm_backend = window_manager_backend.WindowManagerBackend(
          self._browser_websocket)
  def _CreateMemoryBackendIfNeeded(self):
    assert self.supports_overriding_memory_pressure_notifications
    if not self._memory_backend:
      self._memory_backend = memory_backend.MemoryBackend(
          self._browser_websocket)
  def _CreateSystemInfoBackendIfNeeded(self):
    if not self._system_info_backend:
      self._system_info_backend = system_info_backend.SystemInfoBackend(
          self.browser_target_url)
  def StartChromeTracing(self, trace_config, transfer_mode=None, timeout=20):
    """
    Args:
        trace_config: An tracing_config.TracingConfig instance.
        transfer_mode: Defaults to using 'ReturnAsStream' transfer mode
          for Chrome tracing. Can be set to 'ReportEvents'.
        timeout: Time waited for websocket to receive a response.
    """
    if not self._tracing_backend:
      return
    assert trace_config and trace_config.enable_chrome_trace
    return self._tracing_backend.StartTracing(
        trace_config.chrome_trace_config, transfer_mode, timeout)
  def RecordChromeClockSyncMarker(self, sync_id):
    assert self.is_tracing_running, 'Tracing must be running to clock sync.'
    self._tracing_backend.RecordClockSyncMarker(sync_id)
  def StopChromeTracing(self):
    if not self._tracing_backend:
      return
    assert self.is_tracing_running
    try:
      backend = self.FirstTabBackend()
      if backend is not None:
        backend.AddTimelineMarker('first-renderer-thread')
        backend.AddTimelineMarker(backend.id)
      else:
        logging.warning('No page inspector backend found.')
    finally:
      self._tracing_backend.StopTracing()
  def _IterInspectorBackends(self, types):
    """Iterate over inspector backends from this client.
    Note: The devtools client might list contexts which, howerver, do not yet
    have a live DevTools instance to connect to (e.g. background tabs which may
    have been discarded or not yet created). In such case this method will hang
    and eventually timeout when trying to create an inspector backend to
    communicate with such contexts.
    """
    context_map = self.GetUpdatedInspectableContexts()
    for context in context_map.contexts:
      if context['type'] in types:
        yield context_map.GetInspectorBackend(context['id'])
  def FirstTabBackend(self):
    """Obtain the inspector backend for the firstly created tab."""
    return next(self._IterInspectorBackends(['page']), None)
  def CollectChromeTracingData(self, trace_data_builder, timeout=120):
    if not self._tracing_backend:
      return
    self._tracing_backend.CollectTraceData(trace_data_builder, timeout)
  # This call may be made early during browser bringup and may cause the
  # GPU process to launch, which takes a long time in Debug builds and
  # has been seen to frequently exceed the default 10s timeout used
  # throughout this file. Use a larger timeout by default. Callers
  # typically do not override this.
  def GetSystemInfo(self, timeout=_FIRST_CALL_TIMEOUT):
    self._CreateSystemInfoBackendIfNeeded()
    return self._system_info_backend.GetSystemInfo(timeout)
  def DumpMemory(self, timeout=None, detail_level=None):
    """Dumps memory.
    Args:
      timeout: seconds to wait between websocket responses.
      detail_level: Level of detail in memory dump. One of ['detailed',
      'light', 'background']. Defaults to 'detailed'.
    Returns:
      GUID of the generated dump if successful, None otherwise.
    Raises:
      TracingTimeoutException: If more than |timeout| seconds has passed
      since the last time any data is received.
      TracingUnrecoverableException: If there is a websocket error.
      TracingUnexpectedResponseException: If the response contains an error
      or does not contain the expected result.
    """
    if not self._tracing_backend:
      return None
    return self._tracing_backend.DumpMemory(
        timeout=timeout,
        detail_level=detail_level)
  def SetMemoryPressureNotificationsSuppressed(self, suppressed, timeout=30):
    """Enable/disable suppressing memory pressure notifications.
    Args:
      suppressed: If true, memory pressure notifications will be suppressed.
      timeout: The timeout in seconds.
    Raises:
      MemoryTimeoutException: If more than |timeout| seconds has passed
      since the last time any data is received.
      MemoryUnrecoverableException: If there is a websocket error.
      MemoryUnexpectedResponseException: If the response contains an error
      or does not contain the expected result.
    """
    self._CreateMemoryBackendIfNeeded()
    return self._memory_backend.SetMemoryPressureNotificationsSuppressed(
        suppressed, timeout)
  def SimulateMemoryPressureNotification(self, pressure_level, timeout=30):
    """Simulate a memory pressure notification.
    Args:
      pressure level: The memory pressure level of the notification ('moderate'
      or 'critical').
      timeout: The timeout in seconds.
    Raises:
      MemoryTimeoutException: If more than |timeout| seconds has passed
      since the last time any data is received.
      MemoryUnrecoverableException: If there is a websocket error.
      MemoryUnexpectedResponseException: If the response contains an error
      or does not contain the expected result.
    """
    self._CreateMemoryBackendIfNeeded()
    return self._memory_backend.SimulateMemoryPressureNotification(
        pressure_level, timeout)
  @property
  def window_manager_backend(self):
    """Return the window manager backend.
    This should be called by a CrOS backend only.
    """
    self._CreateWindowManagerBackendIfNeeded()
    return self._wm_backend
  def ExecuteBrowserCommand(self, command_id, timeout):
    request = {
        'method': 'Browser.executeBrowserCommand',
        'params': {
            'commandId': command_id,
        }
    }
    self._browser_websocket.SyncRequest(request, timeout)
  def SetDownloadBehavior(self, behavior, downloadPath, timeout):
    request = {
        'method': 'Browser.setDownloadBehavior',
        'params': {
            'behavior': behavior,
            'downloadPath': downloadPath,
        }
    }
    self._browser_websocket.SyncRequest(request, timeout)
  def GetWindowForTarget(self, target_id):
    request = {
        'method': 'Browser.getWindowForTarget',
        'params': {
            'targetId': target_id
        }
    }
    return self._browser_websocket.SyncRequest(request, timeout=30)
  def SetWindowBounds(self, window_id, bounds):
    request = {
        'method': 'Browser.setWindowBounds',
        'params': {
            'windowId': window_id,
            'bounds': bounds
        }
    }
    self._browser_websocket.SyncRequest(request, timeout=30)
class _DevToolsContextMapBackend(object):
  def __init__(self, devtools_client):
    self._devtools_client = devtools_client
    self._contexts = None
    self._inspector_backends_dict = {}
  @property
  def contexts(self):
    """The most up to date contexts data.
    Returned in the order returned by devtools agent."""
    return self._contexts
  def GetContextInfo(self, context_id):
    for context in self._contexts:
      if context['id'] == context_id:
        return context
    raise KeyError('Cannot find a context with id=%s' % context_id)
  def GetInspectorBackend(self, context_id):
    """Gets an InspectorBackend instance for the given context_id.
    This lazily creates InspectorBackend for the context_id if it does
    not exist yet. Otherwise, it will return the cached instance."""
    if context_id in self._inspector_backends_dict:
      return self._inspector_backends_dict[context_id]
    for context in self._contexts:
      if context['id'] == context_id:
        new_backend = inspector_backend.InspectorBackend(
            self._devtools_client, context)
        self._inspector_backends_dict[context_id] = new_backend
        return new_backend
    raise KeyError('Cannot find a context with id=%s' % context_id)
  def _Update(self, contexts):
    # Remove InspectorBackend that is not in the current inspectable
    # contexts list.
    context_ids = [context['id'] for context in contexts]
    for context_id in list(self._inspector_backends_dict.keys()):
      if context_id not in context_ids:
        backend = self._inspector_backends_dict[context_id]
        backend.Disconnect()
        del self._inspector_backends_dict[context_id]
    valid_contexts = []
    for context in contexts:
      # If the context does not have webSocketDebuggerUrl, skip it.
      # If an InspectorBackend is already created for the tab,
      # webSocketDebuggerUrl will be missing, and this is expected.
      context_id = context['id']
      if context_id not in self._inspector_backends_dict:
        if 'webSocketDebuggerUrl' not in context:
          logging.debug('webSocketDebuggerUrl missing, removing %s',
                        context_id)
          continue
      valid_contexts.append(context)
    self._contexts = valid_contexts
  def Clear(self):
    for backend in self._inspector_backends_dict.values():
      backend.Disconnect()
    self._inspector_backends_dict = {}
    self._contexts = None
 | 
	bsd-3-clause | 6,397,773,995,444,545,000 | 34.244337 | 100 | 0.696662 | false | 
| 
	v-zhongz/azure-linux-extensions | 
	VMBackup/main/Utils/WAAgentUtil.py | 
	11 | 
	2528 | 
	# Wrapper module for waagent
#
# waagent is not written as a module. This wrapper module is created 
# to use the waagent code as a module.
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import imp
import os
import os.path
#
# The following code will search and load waagent code and expose
# it as a submodule of current module
#
def searchWAAgent():
    agentPath = '/usr/sbin/waagent'
    if(os.path.isfile(agentPath)):
        return agentPath
    user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
    for user_path in user_paths:
        agentPath = os.path.join(user_path, 'waagent')
        if(os.path.isfile(agentPath)):
            return agentPath
    return None
agentPath = searchWAAgent()
if(agentPath):
    waagent = imp.load_source('waagent', agentPath)
else:
    raise Exception("Can't load waagent.")
if not hasattr(waagent, "AddExtensionEvent"):
    """
    If AddExtensionEvent is not defined, provide a dummy impl.
    """
    def _AddExtensionEvent(*args, **kwargs):
        pass
    waagent.AddExtensionEvent = _AddExtensionEvent
if not hasattr(waagent, "WALAEventOperation"):
    class _WALAEventOperation:
        HeartBeat = "HeartBeat"
        Provision = "Provision"
        Install = "Install"
        UnIsntall = "UnInstall"
        Disable = "Disable"
        Enable = "Enable"
        Download = "Download"
        Upgrade = "Upgrade"
        Update = "Update"           
    waagent.WALAEventOperation = _WALAEventOperation
__ExtensionName__ = None
def InitExtensionEventLog(name):
    __ExtensionName__ = name
def AddExtensionEvent(name=__ExtensionName__,
                      op=waagent.WALAEventOperation.Enable, 
                      isSuccess=False, 
                      message=None):
    if name is not None:
        waagent.AddExtensionEvent(name=name,
                                  op=op,
                                  isSuccess=isSuccess,
                                  message=message)
 | 
	apache-2.0 | 1,095,373,671,504,539,800 | 29.804878 | 74 | 0.650831 | false | 
| 
	ibinti/intellij-community | 
	python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/topology.py | 
	311 | 
	2226 | 
	"""
 This module houses the GEOS ctypes prototype functions for the
 topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull',
           'geos_difference', 'geos_envelope', 'geos_intersection', 
           'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify',
           'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate']
from ctypes import c_char_p, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args):
    "For GEOS unary topology functions."
    argtypes = [GEOM_PTR]
    if args: argtypes += args
    func.argtypes = argtypes
    func.restype = GEOM_PTR
    func.errcheck = check_geom
    return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Routines only in GEOS 3.1+
if GEOS_PREPARE:
    geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
    geos_cascaded_union.argtypes = [GEOM_PTR]
    geos_cascaded_union.restype = GEOM_PTR
    __all__.append('geos_cascaded_union')
 | 
	apache-2.0 | -7,917,583,549,610,253,000 | 42.647059 | 84 | 0.743935 | false | 
			Subsets and Splits
				
	
				
			
				
Gradio Code Samples
												Limits the results to entries containing the word 'gradio' in the repo_name, content, or path, providing a filtered subset of the dataset.
													
